status
stringclasses 1
value | repo_name
stringclasses 31
values | repo_url
stringclasses 31
values | issue_id
int64 1
104k
| title
stringlengths 4
369
| body
stringlengths 0
254k
⌀ | issue_url
stringlengths 37
56
| pull_url
stringlengths 37
54
| before_fix_sha
stringlengths 40
40
| after_fix_sha
stringlengths 40
40
| report_datetime
timestamp[us, tz=UTC] | language
stringclasses 5
values | commit_datetime
timestamp[us, tz=UTC] | updated_file
stringlengths 4
188
| file_content
stringlengths 0
5.12M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,577 |
pytest 7 test failures
|
### Summary
In Fedora, we are testing the impact of pytest 7.0.0rc1 on our packages. Both `ansible` 2.9.27 and `ansible-core` 2.12.1 fail with various different errors.
ansible:
= 1836 failed, 8451 passed, 711 skipped, 7 warnings, 12 errors in 665.67s (0:11:05) =
ansible-core:
=========== 25 failed, 1768 passed, 11 skipped, 7 warnings in 49.19s ===========
### Issue Type
Bug Report
### Component Name
test?
### Ansible Version
```console
2.9.27
```
### Configuration
```console
N/A
```
### OS / Environment
Fedora Linux 36, Python 3.10.1, pytest 7.0.0rc1
### Steps to Reproduce
In Fedora, we build the packages in this copr: https://copr.fedorainfracloud.org/coprs/churchyard/pytest-7/
Unfortunately, I don't know ansible much and I don't know how you run the tests, but should be reproducible if you install a pre-release pytest to the environment.
### Expected Results
The tests should pass, as they do with pytest 6.2.5.
### Actual Results
- Actual results for ansible: [ansible-builder-live.log.gz](https://github.com/ansible/ansible/files/7734616/ansible-builder-live.log.gz)
- Actual results for ansible-core: [ansible-core-builder-live.log.gz](https://github.com/ansible/ansible/files/7734617/ansible-core-builder-live.log.gz)
Also available at https://copr.fedorainfracloud.org/coprs/churchyard/pytest-7/package/ansible/ and https://copr.fedorainfracloud.org/coprs/churchyard/pytest-7/package/ansible-core/
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76577
|
https://github.com/ansible/ansible/pull/76670
|
b30b8fb79c64535ba920d8359d32be50ce1b1b25
|
aaa7944b0238842980898dae59ddb9dcffed09cc
| 2021-12-17T11:07:35Z |
python
| 2022-01-07T23:53:44Z |
test/units/galaxy/test_role_install.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pytest
import tempfile
from io import StringIO
from ansible import context
from ansible.cli.galaxy import GalaxyCLI
from ansible.galaxy import api, role, Galaxy
from ansible.module_utils._text import to_text
from ansible.utils import context_objects as co
def call_galaxy_cli(args):
orig = co.GlobalCLIArgs._Singleton__instance
co.GlobalCLIArgs._Singleton__instance = None
try:
GalaxyCLI(args=['ansible-galaxy', 'role'] + args).run()
finally:
co.GlobalCLIArgs._Singleton__instance = orig
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture(autouse=True)
def galaxy_server():
context.CLIARGS._store = {'ignore_certs': False}
galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com')
return galaxy_api
@pytest.fixture(autouse=True)
def init_role_dir(tmp_path_factory):
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Roles Input'))
namespace = 'ansible_namespace'
role = 'role'
skeleton_path = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'role_skeleton')
call_galaxy_cli(['init', '%s.%s' % (namespace, role), '-c', '--init-path', test_dir, '--role-skeleton', skeleton_path])
def mock_NamedTemporaryFile(mocker, **args):
mock_ntf = mocker.MagicMock()
mock_ntf.write = mocker.MagicMock()
mock_ntf.close = mocker.MagicMock()
mock_ntf.name = None
return mock_ntf
@pytest.fixture(autouse=True)
def init_test(monkeypatch):
monkeypatch.setattr(tempfile, 'NamedTemporaryFile', mock_NamedTemporaryFile)
@pytest.fixture(autouse=True)
def mock_role_download_api(mocker, monkeypatch):
mock_role_api = mocker.MagicMock()
mock_role_api.side_effect = [
StringIO(u''),
]
monkeypatch.setattr(role, 'open_url', mock_role_api)
return mock_role_api
def test_role_download_github(mocker, galaxy_server, mock_role_download_api, monkeypatch):
mock_api = mocker.MagicMock()
mock_api.side_effect = [
StringIO(u'{"available_versions":{"v1":"v1/"}}'),
StringIO(u'{"results":[{"id":"123","github_user":"test_owner","github_repo": "test_role"}]}'),
StringIO(u'{"results":[{"name": "0.0.1"},{"name": "0.0.2"}]}'),
]
monkeypatch.setattr(api, 'open_url', mock_api)
role.GalaxyRole(Galaxy(), galaxy_server, 'test_owner.test_role', version="0.0.1").install()
assert mock_role_download_api.call_count == 1
assert mock_role_download_api.mock_calls[0][1][0] == 'https://github.com/test_owner/test_role/archive/0.0.1.tar.gz'
def test_role_download_github_default_version(mocker, galaxy_server, mock_role_download_api, monkeypatch):
mock_api = mocker.MagicMock()
mock_api.side_effect = [
StringIO(u'{"available_versions":{"v1":"v1/"}}'),
StringIO(u'{"results":[{"id":"123","github_user":"test_owner","github_repo": "test_role"}]}'),
StringIO(u'{"results":[{"name": "0.0.1"},{"name": "0.0.2"}]}'),
]
monkeypatch.setattr(api, 'open_url', mock_api)
role.GalaxyRole(Galaxy(), galaxy_server, 'test_owner.test_role').install()
assert mock_role_download_api.call_count == 1
assert mock_role_download_api.mock_calls[0][1][0] == 'https://github.com/test_owner/test_role/archive/0.0.2.tar.gz'
def test_role_download_github_no_download_url_for_version(mocker, galaxy_server, mock_role_download_api, monkeypatch):
mock_api = mocker.MagicMock()
mock_api.side_effect = [
StringIO(u'{"available_versions":{"v1":"v1/"}}'),
StringIO(u'{"results":[{"id":"123","github_user":"test_owner","github_repo": "test_role"}]}'),
StringIO(u'{"results":[{"name": "0.0.1"},{"name": "0.0.2","download_url":"http://localhost:8080/test_owner/test_role/0.0.2.tar.gz"}]}'),
]
monkeypatch.setattr(api, 'open_url', mock_api)
role.GalaxyRole(Galaxy(), galaxy_server, 'test_owner.test_role', version="0.0.1").install()
assert mock_role_download_api.call_count == 1
assert mock_role_download_api.mock_calls[0][1][0] == 'https://github.com/test_owner/test_role/archive/0.0.1.tar.gz'
def test_role_download_url(mocker, galaxy_server, mock_role_download_api, monkeypatch):
mock_api = mocker.MagicMock()
mock_api.side_effect = [
StringIO(u'{"available_versions":{"v1":"v1/"}}'),
StringIO(u'{"results":[{"id":"123","github_user":"test_owner","github_repo": "test_role"}]}'),
StringIO(u'{"results":[{"name": "0.0.1","download_url":"http://localhost:8080/test_owner/test_role/0.0.1.tar.gz"},'
u'{"name": "0.0.2","download_url":"http://localhost:8080/test_owner/test_role/0.0.2.tar.gz"}]}'),
]
monkeypatch.setattr(api, 'open_url', mock_api)
role.GalaxyRole(Galaxy(), galaxy_server, 'test_owner.test_role', version="0.0.1").install()
assert mock_role_download_api.call_count == 1
assert mock_role_download_api.mock_calls[0][1][0] == 'http://localhost:8080/test_owner/test_role/0.0.1.tar.gz'
def test_role_download_url_default_version(mocker, galaxy_server, mock_role_download_api, monkeypatch):
mock_api = mocker.MagicMock()
mock_api.side_effect = [
StringIO(u'{"available_versions":{"v1":"v1/"}}'),
StringIO(u'{"results":[{"id":"123","github_user":"test_owner","github_repo": "test_role"}]}'),
StringIO(u'{"results":[{"name": "0.0.1","download_url":"http://localhost:8080/test_owner/test_role/0.0.1.tar.gz"},'
u'{"name": "0.0.2","download_url":"http://localhost:8080/test_owner/test_role/0.0.2.tar.gz"}]}'),
]
monkeypatch.setattr(api, 'open_url', mock_api)
role.GalaxyRole(Galaxy(), galaxy_server, 'test_owner.test_role').install()
assert mock_role_download_api.call_count == 1
assert mock_role_download_api.mock_calls[0][1][0] == 'http://localhost:8080/test_owner/test_role/0.0.2.tar.gz'
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 59,206 |
Ansible generates a bad SSH commandline when there's a space in the 'control_path'
|
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
When for any reason the .ansible/ directory is inside a path **with whitespaces**, the 'control_path' generated by default is wrong, and thus the SSH commandline.
The addition of quotes surrounding the 'control_path' inside 'ansible/plugins/connection/ssh.py' fixes the bug.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
Connection plugin
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```
ansible 2.8.2
config file = /Users/pposca/proyectos/kipeos-devops/ansible.cfg
configured module search path = ['/Users/pposca/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/pposca/.virtualenvs/kipeos-devops/lib/python3.7/site-packages/ansible
executable location = /Users/pposca/.virtualenvs/kipeos-devops/bin/ansible
python version = 3.7.3 (default, Jul 7 2019, 18:15:21) [Clang 10.0.1 (clang-1001.0.46.4)]
```
##### CONFIGURATION
<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
```
DEFAULT_HOST_LIST(/Users/pposca/proyectos/kipeos-devops/ansible.cfg) = ['/Users/pposca/proyectos/kipeos-devops/hosts']
INTERPRETER_PYTHON(/Users/pposca/proyectos/kipeos-devops/ansible.cfg) = auto
```
##### OS / ENVIRONMENT
- MacOS 10.14.5
- Mackup (so .ansible/ is a symlink to a directory with whitespaces in its path)
- Python 3.7.3
##### STEPS TO REPRODUCE
Having .ansible inside a directory with whitespaces in its path:
```
ansible all -m ping
```
##### EXPECTED RESULTS
```
mydomain.com | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python3"
},
"changed": false,
"ping": "pong"
}
```
##### ACTUAL RESULTS
```
ansible 2.8.2
config file = /Users/pposca/proyectos/kipeos-devops/ansible.cfg
configured module search path = ['/Users/pposca/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/pposca/.virtualenvs/kipeos-devops/lib/python3.7/site-packages/ansible
executable location = /Users/pposca/.virtualenvs/kipeos-devops/bin/ansible
python version = 3.7.3 (default, Jul 7 2019, 18:15:21) [Clang 10.0.1 (clang-1001.0.46.4)]
Using /Users/pposca/proyectos/kipeos-devops/ansible.cfg as config file
setting up inventory plugins
host_list declined parsing /Users/pposca/proyectos/kipeos-devops/hosts as it did not pass it's verify_file() method
script declined parsing /Users/pposca/proyectos/kipeos-devops/hosts as it did not pass it's verify_file() method
auto declined parsing /Users/pposca/proyectos/kipeos-devops/hosts as it did not pass it's verify_file() method
Parsed /Users/pposca/proyectos/kipeos-devops/hosts inventory source with ini plugin
Loading callback plugin minimal of type stdout, v2.0 from /Users/pposca/.virtualenvs/kipeos-devops/lib/python3.7/site-packages/ansible/plugins/callback/minimal.py
META: ran handlers
<mydomain.com> ESTABLISH SSH CONNECTION FOR USER: None
<mydomain.com> SSH: EXEC ssh -vvv -C -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o 'ControlPath=/Volumes/Apple HDD/VDrive/Dropbox/Mackup/.ansible/cp/fc95ded756' mydomain.com '/bin/sh -c '"'"'echo ~ && sleep 0'"'"''
<mydomain.com> (255, b'', b'command-line line 0: garbage at end of line; "HDD/VDrive/Dropbox/Mackup/.ansible/cp/fc95ded756".\r\n')
mydomain.com | UNREACHABLE! => {
"changed": false,
"msg": "Failed to connect to the host via ssh: command-line line 0: garbage at end of line; \"HDD/VDrive/Dropbox/Mackup/.ansible/cp/fc95ded756\".",
"unreachable": true
}
```
##### VALUE OF THE VARIABLE 'b_args'
Note the whitespace after 'Apple':
```
(b'-o', b'ControlPath=/Volumes/Apple HDD/VDrive/Dropbox/Mackup/.ansible/cp/fc95ded756')
```
##### RELATED ISSUES
#7713 #31916
|
https://github.com/ansible/ansible/issues/59206
|
https://github.com/ansible/ansible/pull/76424
|
0ef5274a3c6189e8fa6a7d97993c165ab548fe95
|
aa022dba2d141cbd3b862767400ba4f714a9edd1
| 2019-07-17T22:03:30Z |
python
| 2022-01-10T17:29:35Z |
changelogs/fragments/ssh_quote_cp.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 59,206 |
Ansible generates a bad SSH commandline when there's a space in the 'control_path'
|
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
When for any reason the .ansible/ directory is inside a path **with whitespaces**, the 'control_path' generated by default is wrong, and thus the SSH commandline.
The addition of quotes surrounding the 'control_path' inside 'ansible/plugins/connection/ssh.py' fixes the bug.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
Connection plugin
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```
ansible 2.8.2
config file = /Users/pposca/proyectos/kipeos-devops/ansible.cfg
configured module search path = ['/Users/pposca/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/pposca/.virtualenvs/kipeos-devops/lib/python3.7/site-packages/ansible
executable location = /Users/pposca/.virtualenvs/kipeos-devops/bin/ansible
python version = 3.7.3 (default, Jul 7 2019, 18:15:21) [Clang 10.0.1 (clang-1001.0.46.4)]
```
##### CONFIGURATION
<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
```
DEFAULT_HOST_LIST(/Users/pposca/proyectos/kipeos-devops/ansible.cfg) = ['/Users/pposca/proyectos/kipeos-devops/hosts']
INTERPRETER_PYTHON(/Users/pposca/proyectos/kipeos-devops/ansible.cfg) = auto
```
##### OS / ENVIRONMENT
- MacOS 10.14.5
- Mackup (so .ansible/ is a symlink to a directory with whitespaces in its path)
- Python 3.7.3
##### STEPS TO REPRODUCE
Having .ansible inside a directory with whitespaces in its path:
```
ansible all -m ping
```
##### EXPECTED RESULTS
```
mydomain.com | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python3"
},
"changed": false,
"ping": "pong"
}
```
##### ACTUAL RESULTS
```
ansible 2.8.2
config file = /Users/pposca/proyectos/kipeos-devops/ansible.cfg
configured module search path = ['/Users/pposca/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/pposca/.virtualenvs/kipeos-devops/lib/python3.7/site-packages/ansible
executable location = /Users/pposca/.virtualenvs/kipeos-devops/bin/ansible
python version = 3.7.3 (default, Jul 7 2019, 18:15:21) [Clang 10.0.1 (clang-1001.0.46.4)]
Using /Users/pposca/proyectos/kipeos-devops/ansible.cfg as config file
setting up inventory plugins
host_list declined parsing /Users/pposca/proyectos/kipeos-devops/hosts as it did not pass it's verify_file() method
script declined parsing /Users/pposca/proyectos/kipeos-devops/hosts as it did not pass it's verify_file() method
auto declined parsing /Users/pposca/proyectos/kipeos-devops/hosts as it did not pass it's verify_file() method
Parsed /Users/pposca/proyectos/kipeos-devops/hosts inventory source with ini plugin
Loading callback plugin minimal of type stdout, v2.0 from /Users/pposca/.virtualenvs/kipeos-devops/lib/python3.7/site-packages/ansible/plugins/callback/minimal.py
META: ran handlers
<mydomain.com> ESTABLISH SSH CONNECTION FOR USER: None
<mydomain.com> SSH: EXEC ssh -vvv -C -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o 'ControlPath=/Volumes/Apple HDD/VDrive/Dropbox/Mackup/.ansible/cp/fc95ded756' mydomain.com '/bin/sh -c '"'"'echo ~ && sleep 0'"'"''
<mydomain.com> (255, b'', b'command-line line 0: garbage at end of line; "HDD/VDrive/Dropbox/Mackup/.ansible/cp/fc95ded756".\r\n')
mydomain.com | UNREACHABLE! => {
"changed": false,
"msg": "Failed to connect to the host via ssh: command-line line 0: garbage at end of line; \"HDD/VDrive/Dropbox/Mackup/.ansible/cp/fc95ded756\".",
"unreachable": true
}
```
##### VALUE OF THE VARIABLE 'b_args'
Note the whitespace after 'Apple':
```
(b'-o', b'ControlPath=/Volumes/Apple HDD/VDrive/Dropbox/Mackup/.ansible/cp/fc95ded756')
```
##### RELATED ISSUES
#7713 #31916
|
https://github.com/ansible/ansible/issues/59206
|
https://github.com/ansible/ansible/pull/76424
|
0ef5274a3c6189e8fa6a7d97993c165ab548fe95
|
aa022dba2d141cbd3b862767400ba4f714a9edd1
| 2019-07-17T22:03:30Z |
python
| 2022-01-10T17:29:35Z |
lib/ansible/plugins/connection/ssh.py
|
# Copyright (c) 2012, Michael DeHaan <[email protected]>
# Copyright 2015 Abhijit Menon-Sen <[email protected]>
# Copyright 2017 Toshio Kuratomi <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: ssh
short_description: connect via SSH client binary
description:
- This connection plugin allows Ansible to communicate to the target machines through normal SSH command line.
- Ansible does not expose a channel to allow communication between the user and the SSH process to accept
a password manually to decrypt an SSH key when using this connection plugin (which is the default). The
use of C(ssh-agent) is highly recommended.
author: ansible (@core)
extends_documentation_fragment:
- connection_pipelining
version_added: historical
notes:
- Many options default to C(None) here but that only means we do not override the SSH tool's defaults and/or configuration.
For example, if you specify the port in this plugin it will override any C(Port) entry in your C(.ssh/config).
options:
host:
description: Hostname/IP to connect to.
vars:
- name: inventory_hostname
- name: ansible_host
- name: ansible_ssh_host
- name: delegated_vars['ansible_host']
- name: delegated_vars['ansible_ssh_host']
host_key_checking:
description: Determines if SSH should check host keys.
default: True
type: boolean
ini:
- section: defaults
key: 'host_key_checking'
- section: ssh_connection
key: 'host_key_checking'
version_added: '2.5'
env:
- name: ANSIBLE_HOST_KEY_CHECKING
- name: ANSIBLE_SSH_HOST_KEY_CHECKING
version_added: '2.5'
vars:
- name: ansible_host_key_checking
version_added: '2.5'
- name: ansible_ssh_host_key_checking
version_added: '2.5'
password:
description: Authentication password for the C(remote_user). Can be supplied as CLI option.
vars:
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_ssh_password
sshpass_prompt:
description:
- Password prompt that sshpass should search for. Supported by sshpass 1.06 and up.
- Defaults to C(Enter PIN for) when pkcs11_provider is set.
default: ''
ini:
- section: 'ssh_connection'
key: 'sshpass_prompt'
env:
- name: ANSIBLE_SSHPASS_PROMPT
vars:
- name: ansible_sshpass_prompt
version_added: '2.10'
ssh_args:
description: Arguments to pass to all SSH CLI tools.
default: '-C -o ControlMaster=auto -o ControlPersist=60s'
ini:
- section: 'ssh_connection'
key: 'ssh_args'
env:
- name: ANSIBLE_SSH_ARGS
vars:
- name: ansible_ssh_args
version_added: '2.7'
ssh_common_args:
description: Common extra args for all SSH CLI tools.
ini:
- section: 'ssh_connection'
key: 'ssh_common_args'
version_added: '2.7'
env:
- name: ANSIBLE_SSH_COMMON_ARGS
version_added: '2.7'
vars:
- name: ansible_ssh_common_args
cli:
- name: ssh_common_args
default: ''
ssh_executable:
default: ssh
description:
- This defines the location of the SSH binary. It defaults to C(ssh) which will use the first SSH binary available in $PATH.
- This option is usually not required, it might be useful when access to system SSH is restricted,
or when using SSH wrappers to connect to remote hosts.
env: [{name: ANSIBLE_SSH_EXECUTABLE}]
ini:
- {key: ssh_executable, section: ssh_connection}
#const: ANSIBLE_SSH_EXECUTABLE
version_added: "2.2"
vars:
- name: ansible_ssh_executable
version_added: '2.7'
sftp_executable:
default: sftp
description:
- This defines the location of the sftp binary. It defaults to C(sftp) which will use the first binary available in $PATH.
env: [{name: ANSIBLE_SFTP_EXECUTABLE}]
ini:
- {key: sftp_executable, section: ssh_connection}
version_added: "2.6"
vars:
- name: ansible_sftp_executable
version_added: '2.7'
scp_executable:
default: scp
description:
- This defines the location of the scp binary. It defaults to C(scp) which will use the first binary available in $PATH.
env: [{name: ANSIBLE_SCP_EXECUTABLE}]
ini:
- {key: scp_executable, section: ssh_connection}
version_added: "2.6"
vars:
- name: ansible_scp_executable
version_added: '2.7'
scp_extra_args:
description: Extra exclusive to the C(scp) CLI
vars:
- name: ansible_scp_extra_args
env:
- name: ANSIBLE_SCP_EXTRA_ARGS
version_added: '2.7'
ini:
- key: scp_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: scp_extra_args
default: ''
sftp_extra_args:
description: Extra exclusive to the C(sftp) CLI
vars:
- name: ansible_sftp_extra_args
env:
- name: ANSIBLE_SFTP_EXTRA_ARGS
version_added: '2.7'
ini:
- key: sftp_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: sftp_extra_args
default: ''
ssh_extra_args:
description: Extra exclusive to the SSH CLI.
vars:
- name: ansible_ssh_extra_args
env:
- name: ANSIBLE_SSH_EXTRA_ARGS
version_added: '2.7'
ini:
- key: ssh_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: ssh_extra_args
default: ''
reconnection_retries:
description: Number of attempts to connect.
default: 0
type: integer
env:
- name: ANSIBLE_SSH_RETRIES
ini:
- section: connection
key: retries
- section: ssh_connection
key: retries
vars:
- name: ansible_ssh_retries
version_added: '2.7'
port:
description: Remote port to connect to.
type: int
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
- name: ansible_ssh_port
remote_user:
description:
- User name with which to login to the remote server, normally set by the remote_user keyword.
- If no user is supplied, Ansible will let the SSH client binary choose the user as it normally.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
- name: ansible_ssh_user
cli:
- name: user
pipelining:
env:
- name: ANSIBLE_PIPELINING
- name: ANSIBLE_SSH_PIPELINING
ini:
- section: defaults
key: pipelining
- section: connection
key: pipelining
- section: ssh_connection
key: pipelining
vars:
- name: ansible_pipelining
- name: ansible_ssh_pipelining
private_key_file:
description:
- Path to private key file to use for authentication.
ini:
- section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
- name: ansible_ssh_private_key_file
cli:
- name: private_key_file
option: '--private-key'
control_path:
description:
- This is the location to save SSH's ControlPath sockets, it uses SSH's variable substitution.
- Since 2.3, if null (default), ansible will generate a unique hash. Use ``%(directory)s`` to indicate where to use the control dir path setting.
- Before 2.3 it defaulted to ``control_path=%(directory)s/ansible-ssh-%%h-%%p-%%r``.
- Be aware that this setting is ignored if C(-o ControlPath) is set in ssh args.
env:
- name: ANSIBLE_SSH_CONTROL_PATH
ini:
- key: control_path
section: ssh_connection
vars:
- name: ansible_control_path
version_added: '2.7'
control_path_dir:
default: ~/.ansible/cp
description:
- This sets the directory to use for ssh control path if the control path setting is null.
- Also, provides the ``%(directory)s`` variable for the control path setting.
env:
- name: ANSIBLE_SSH_CONTROL_PATH_DIR
ini:
- section: ssh_connection
key: control_path_dir
vars:
- name: ansible_control_path_dir
version_added: '2.7'
sftp_batch_mode:
default: 'yes'
description: 'TODO: write it'
env: [{name: ANSIBLE_SFTP_BATCH_MODE}]
ini:
- {key: sftp_batch_mode, section: ssh_connection}
type: bool
vars:
- name: ansible_sftp_batch_mode
version_added: '2.7'
ssh_transfer_method:
description:
- "Preferred method to use when transferring files over ssh"
- Setting to 'smart' (default) will try them in order, until one succeeds or they all fail
- Using 'piped' creates an ssh pipe with C(dd) on either side to copy the data
choices: ['sftp', 'scp', 'piped', 'smart']
env: [{name: ANSIBLE_SSH_TRANSFER_METHOD}]
ini:
- {key: transfer_method, section: ssh_connection}
vars:
- name: ansible_ssh_transfer_method
version_added: '2.12'
scp_if_ssh:
deprecated:
why: In favor of the "ssh_transfer_method" option.
version: "2.17"
alternatives: ssh_transfer_method
default: smart
description:
- "Preferred method to use when transfering files over SSH."
- When set to I(smart), Ansible will try them until one succeeds or they all fail.
- If set to I(True), it will force 'scp', if I(False) it will use 'sftp'.
- This setting will overridden by ssh_transfer_method if set.
env: [{name: ANSIBLE_SCP_IF_SSH}]
ini:
- {key: scp_if_ssh, section: ssh_connection}
vars:
- name: ansible_scp_if_ssh
version_added: '2.7'
use_tty:
version_added: '2.5'
default: 'yes'
description: add -tt to ssh commands to force tty allocation.
env: [{name: ANSIBLE_SSH_USETTY}]
ini:
- {key: usetty, section: ssh_connection}
type: bool
vars:
- name: ansible_ssh_use_tty
version_added: '2.7'
timeout:
default: 10
description:
- This is the default ammount of time we will wait while establishing an SSH connection.
- It also controls how long we can wait to access reading the connection once established (select on the socket).
env:
- name: ANSIBLE_TIMEOUT
- name: ANSIBLE_SSH_TIMEOUT
version_added: '2.11'
ini:
- key: timeout
section: defaults
- key: timeout
section: ssh_connection
version_added: '2.11'
vars:
- name: ansible_ssh_timeout
version_added: '2.11'
cli:
- name: timeout
type: integer
pkcs11_provider:
version_added: '2.12'
default: ""
description:
- "PKCS11 SmartCard provider such as opensc, example: /usr/local/lib/opensc-pkcs11.so"
- Requires sshpass version 1.06+, sshpass must support the -P option.
env: [{name: ANSIBLE_PKCS11_PROVIDER}]
ini:
- {key: pkcs11_provider, section: ssh_connection}
vars:
- name: ansible_ssh_pkcs11_provider
'''
import errno
import fcntl
import hashlib
import os
import pty
import re
import shlex
import subprocess
import time
from functools import wraps
from ansible import constants as C
from ansible.errors import (
AnsibleAuthenticationFailure,
AnsibleConnectionFailure,
AnsibleError,
AnsibleFileNotFound,
)
from ansible.errors import AnsibleOptionsError
from ansible.module_utils.compat import selectors
from ansible.module_utils.six import PY3, text_type, binary_type
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.parsing.convert_bool import BOOLEANS, boolean
from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.plugins.shell.powershell import _parse_clixml
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath, makedirs_safe
display = Display()
b_NOT_SSH_ERRORS = (b'Traceback (most recent call last):', # Python-2.6 when there's an exception
# while invoking a script via -m
b'PHP Parse error:', # Php always returns error 255
)
SSHPASS_AVAILABLE = None
class AnsibleControlPersistBrokenPipeError(AnsibleError):
''' ControlPersist broken pipe '''
pass
def _handle_error(remaining_retries, command, return_tuple, no_log, host, display=display):
# sshpass errors
if command == b'sshpass':
# Error 5 is invalid/incorrect password. Raise an exception to prevent retries from locking the account.
if return_tuple[0] == 5:
msg = 'Invalid/incorrect username/password. Skipping remaining {0} retries to prevent account lockout:'.format(remaining_retries)
if remaining_retries <= 0:
msg = 'Invalid/incorrect password:'
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip())
raise AnsibleAuthenticationFailure(msg)
# sshpass returns codes are 1-6. We handle 5 previously, so this catches other scenarios.
# No exception is raised, so the connection is retried - except when attempting to use
# sshpass_prompt with an sshpass that won't let us pass -P, in which case we fail loudly.
elif return_tuple[0] in [1, 2, 3, 4, 6]:
msg = 'sshpass error:'
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
details = to_native(return_tuple[2]).rstrip()
if "sshpass: invalid option -- 'P'" in details:
details = 'Installed sshpass version does not support customized password prompts. ' \
'Upgrade sshpass to use sshpass_prompt, or otherwise switch to ssh keys.'
raise AnsibleError('{0} {1}'.format(msg, details))
msg = '{0} {1}'.format(msg, details)
if return_tuple[0] == 255:
SSH_ERROR = True
for signature in b_NOT_SSH_ERRORS:
if signature in return_tuple[1]:
SSH_ERROR = False
break
if SSH_ERROR:
msg = "Failed to connect to the host via ssh:"
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip())
raise AnsibleConnectionFailure(msg)
# For other errors, no exception is raised so the connection is retried and we only log the messages
if 1 <= return_tuple[0] <= 254:
msg = u"Failed to connect to the host via ssh:"
if no_log:
msg = u'{0} <error censored due to no log>'.format(msg)
else:
msg = u'{0} {1}'.format(msg, to_text(return_tuple[2]).rstrip())
display.vvv(msg, host=host)
def _ssh_retry(func):
"""
Decorator to retry ssh/scp/sftp in the case of a connection failure
Will retry if:
* an exception is caught
* ssh returns 255
Will not retry if
* sshpass returns 5 (invalid password, to prevent account lockouts)
* remaining_tries is < 2
* retries limit reached
"""
@wraps(func)
def wrapped(self, *args, **kwargs):
remaining_tries = int(self.get_option('reconnection_retries')) + 1
cmd_summary = u"%s..." % to_text(args[0])
conn_password = self.get_option('password') or self._play_context.password
for attempt in range(remaining_tries):
cmd = args[0]
if attempt != 0 and conn_password and isinstance(cmd, list):
# If this is a retry, the fd/pipe for sshpass is closed, and we need a new one
self.sshpass_pipe = os.pipe()
cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
try:
try:
return_tuple = func(self, *args, **kwargs)
# TODO: this should come from task
if self._play_context.no_log:
display.vvv(u'rc=%s, stdout and stderr censored due to no log' % return_tuple[0], host=self.host)
else:
display.vvv(return_tuple, host=self.host)
# 0 = success
# 1-254 = remote command return code
# 255 could be a failure from the ssh command itself
except (AnsibleControlPersistBrokenPipeError):
# Retry one more time because of the ControlPersist broken pipe (see #16731)
cmd = args[0]
if conn_password and isinstance(cmd, list):
# This is a retry, so the fd/pipe for sshpass is closed, and we need a new one
self.sshpass_pipe = os.pipe()
cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
display.vvv(u"RETRYING BECAUSE OF CONTROLPERSIST BROKEN PIPE")
return_tuple = func(self, *args, **kwargs)
remaining_retries = remaining_tries - attempt - 1
_handle_error(remaining_retries, cmd[0], return_tuple, self._play_context.no_log, self.host)
break
# 5 = Invalid/incorrect password from sshpass
except AnsibleAuthenticationFailure:
# Raising this exception, which is subclassed from AnsibleConnectionFailure, prevents further retries
raise
except (AnsibleConnectionFailure, Exception) as e:
if attempt == remaining_tries - 1:
raise
else:
pause = 2 ** attempt - 1
if pause > 30:
pause = 30
if isinstance(e, AnsibleConnectionFailure):
msg = u"ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt + 1, cmd_summary, pause)
else:
msg = (u"ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), "
u"pausing for %d seconds" % (attempt + 1, to_text(e), cmd_summary, pause))
display.vv(msg, host=self.host)
time.sleep(pause)
continue
return return_tuple
return wrapped
class Connection(ConnectionBase):
''' ssh based connections '''
transport = 'ssh'
has_pipelining = True
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
# TODO: all should come from get_option(), but not might be set at this point yet
self.host = self._play_context.remote_addr
self.port = self._play_context.port
self.user = self._play_context.remote_user
self.control_path = None
self.control_path_dir = None
# Windows operates differently from a POSIX connection/shell plugin,
# we need to set various properties to ensure SSH on Windows continues
# to work
if getattr(self._shell, "_IS_WINDOWS", False):
self.has_native_async = True
self.always_pipeline_modules = True
self.module_implementation_preferences = ('.ps1', '.exe', '')
self.allow_executable = False
# The connection is created by running ssh/scp/sftp from the exec_command,
# put_file, and fetch_file methods, so we don't need to do any connection
# management here.
def _connect(self):
return self
@staticmethod
def _create_control_path(host, port, user, connection=None, pid=None):
'''Make a hash for the controlpath based on con attributes'''
pstring = '%s-%s-%s' % (host, port, user)
if connection:
pstring += '-%s' % connection
if pid:
pstring += '-%s' % to_text(pid)
m = hashlib.sha1()
m.update(to_bytes(pstring))
digest = m.hexdigest()
cpath = '%(directory)s/' + digest[:10]
return cpath
@staticmethod
def _sshpass_available():
global SSHPASS_AVAILABLE
# We test once if sshpass is available, and remember the result. It
# would be nice to use distutils.spawn.find_executable for this, but
# distutils isn't always available; shutils.which() is Python3-only.
if SSHPASS_AVAILABLE is None:
try:
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
SSHPASS_AVAILABLE = True
except OSError:
SSHPASS_AVAILABLE = False
return SSHPASS_AVAILABLE
@staticmethod
def _persistence_controls(b_command):
'''
Takes a command array and scans it for ControlPersist and ControlPath
settings and returns two booleans indicating whether either was found.
This could be smarter, e.g. returning false if ControlPersist is 'no',
but for now we do it simple way.
'''
controlpersist = False
controlpath = False
for b_arg in (a.lower() for a in b_command):
if b'controlpersist' in b_arg:
controlpersist = True
elif b'controlpath' in b_arg:
controlpath = True
return controlpersist, controlpath
def _add_args(self, b_command, b_args, explanation):
"""
Adds arguments to the ssh command and displays a caller-supplied explanation of why.
:arg b_command: A list containing the command to add the new arguments to.
This list will be modified by this method.
:arg b_args: An iterable of new arguments to add. This iterable is used
more than once so it must be persistent (ie: a list is okay but a
StringIO would not)
:arg explanation: A text string containing explaining why the arguments
were added. It will be displayed with a high enough verbosity.
.. note:: This function does its work via side-effect. The b_command list has the new arguments appended.
"""
display.vvvvv(u'SSH: %s: (%s)' % (explanation, ')('.join(to_text(a) for a in b_args)), host=self.host)
b_command += b_args
def _build_command(self, binary, subsystem, *other_args):
'''
Takes a executable (ssh, scp, sftp or wrapper) and optional extra arguments and returns the remote command
wrapped in local ssh shell commands and ready for execution.
:arg binary: actual executable to use to execute command.
:arg subsystem: type of executable provided, ssh/sftp/scp, needed because wrappers for ssh might have diff names.
:arg other_args: dict of, value pairs passed as arguments to the ssh binary
'''
b_command = []
conn_password = self.get_option('password') or self._play_context.password
#
# First, the command to invoke
#
# If we want to use password authentication, we have to set up a pipe to
# write the password to sshpass.
pkcs11_provider = self.get_option("pkcs11_provider")
if conn_password or pkcs11_provider:
if not self._sshpass_available():
raise AnsibleError("to use the 'ssh' connection type with passwords or pkcs11_provider, you must install the sshpass program")
if not conn_password and pkcs11_provider:
raise AnsibleError("to use pkcs11_provider you must specify a password/pin")
self.sshpass_pipe = os.pipe()
b_command += [b'sshpass', b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')]
password_prompt = self.get_option('sshpass_prompt')
if not password_prompt and pkcs11_provider:
# Set default password prompt for pkcs11_provider to make it clear its a PIN
password_prompt = 'Enter PIN for '
if password_prompt:
b_command += [b'-P', to_bytes(password_prompt, errors='surrogate_or_strict')]
b_command += [to_bytes(binary, errors='surrogate_or_strict')]
#
# Next, additional arguments based on the configuration.
#
# pkcs11 mode allows the use of Smartcards or Yubikey devices
if conn_password and pkcs11_provider:
self._add_args(b_command,
(b"-o", b"KbdInteractiveAuthentication=no",
b"-o", b"PreferredAuthentications=publickey",
b"-o", b"PasswordAuthentication=no",
b'-o', to_bytes(u'PKCS11Provider=%s' % pkcs11_provider)),
u'Enable pkcs11')
# sftp batch mode allows us to correctly catch failed transfers, but can
# be disabled if the client side doesn't support the option. However,
# sftp batch mode does not prompt for passwords so it must be disabled
# if not using controlpersist and using sshpass
if subsystem == 'sftp' and self.get_option('sftp_batch_mode'):
if conn_password:
b_args = [b'-o', b'BatchMode=no']
self._add_args(b_command, b_args, u'disable batch mode for sshpass')
b_command += [b'-b', b'-']
if self._play_context.verbosity > 3:
b_command.append(b'-vvv')
# Next, we add ssh_args
ssh_args = self.get_option('ssh_args')
if ssh_args:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in
self._split_ssh_args(ssh_args)]
self._add_args(b_command, b_args, u"ansible.cfg set ssh_args")
# Now we add various arguments that have their own specific settings defined in docs above.
if self.get_option('host_key_checking') is False:
b_args = (b"-o", b"StrictHostKeyChecking=no")
self._add_args(b_command, b_args, u"ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled")
self.port = self.get_option('port')
if self.port is not None:
b_args = (b"-o", b"Port=" + to_bytes(self.port, nonstring='simplerepr', errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set")
key = self.get_option('private_key_file')
if key:
b_args = (b"-o", b'IdentityFile="' + to_bytes(os.path.expanduser(key), errors='surrogate_or_strict') + b'"')
self._add_args(b_command, b_args, u"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set")
if not conn_password:
self._add_args(
b_command, (
b"-o", b"KbdInteractiveAuthentication=no",
b"-o", b"PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
b"-o", b"PasswordAuthentication=no"
),
u"ansible_password/ansible_ssh_password not set"
)
self.user = self.get_option('remote_user')
if self.user:
self._add_args(
b_command,
(b"-o", b'User="%s"' % to_bytes(self.user, errors='surrogate_or_strict')),
u"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set"
)
timeout = self.get_option('timeout')
self._add_args(
b_command,
(b"-o", b"ConnectTimeout=" + to_bytes(timeout, errors='surrogate_or_strict', nonstring='simplerepr')),
u"ANSIBLE_TIMEOUT/timeout set"
)
# Add in any common or binary-specific arguments from the PlayContext
# (i.e. inventory or task settings or overrides on the command line).
for opt in (u'ssh_common_args', u'{0}_extra_args'.format(subsystem)):
attr = self.get_option(opt)
if attr is not None:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in self._split_ssh_args(attr)]
self._add_args(b_command, b_args, u"Set %s" % opt)
# Check if ControlPersist is enabled and add a ControlPath if one hasn't
# already been set.
controlpersist, controlpath = self._persistence_controls(b_command)
if controlpersist:
self._persistent = True
if not controlpath:
self.control_path_dir = self.get_option('control_path_dir')
cpdir = unfrackpath(self.control_path_dir)
b_cpdir = to_bytes(cpdir, errors='surrogate_or_strict')
# The directory must exist and be writable.
makedirs_safe(b_cpdir, 0o700)
if not os.access(b_cpdir, os.W_OK):
raise AnsibleError("Cannot write to ControlPath %s" % to_native(cpdir))
self.control_path = self.get_option('control_path')
if not self.control_path:
self.control_path = self._create_control_path(
self.host,
self.port,
self.user
)
b_args = (b"-o", b"ControlPath=" + to_bytes(self.control_path % dict(directory=cpdir), errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"found only ControlPersist; added ControlPath")
# Finally, we add any caller-supplied extras.
if other_args:
b_command += [to_bytes(a) for a in other_args]
return b_command
def _send_initial_data(self, fh, in_data, ssh_process):
'''
Writes initial data to the stdin filehandle of the subprocess and closes
it. (The handle must be closed; otherwise, for example, "sftp -b -" will
just hang forever waiting for more commands.)
'''
display.debug(u'Sending initial data')
try:
fh.write(to_bytes(in_data))
fh.close()
except (OSError, IOError) as e:
# The ssh connection may have already terminated at this point, with a more useful error
# Only raise AnsibleConnectionFailure if the ssh process is still alive
time.sleep(0.001)
ssh_process.poll()
if getattr(ssh_process, 'returncode', None) is None:
raise AnsibleConnectionFailure(
'Data could not be sent to remote host "%s". Make sure this host can be reached '
'over ssh: %s' % (self.host, to_native(e)), orig_exc=e
)
display.debug(u'Sent initial data (%d bytes)' % len(in_data))
# Used by _run() to kill processes on failures
@staticmethod
def _terminate_process(p):
""" Terminate a process, ignoring errors """
try:
p.terminate()
except (OSError, IOError):
pass
# This is separate from _run() because we need to do the same thing for stdout
# and stderr.
def _examine_output(self, source, state, b_chunk, sudoable):
'''
Takes a string, extracts complete lines from it, tests to see if they
are a prompt, error message, etc., and sets appropriate flags in self.
Prompt and success lines are removed.
Returns the processed (i.e. possibly-edited) output and the unprocessed
remainder (to be processed with the next chunk) as strings.
'''
output = []
for b_line in b_chunk.splitlines(True):
display_line = to_text(b_line).rstrip('\r\n')
suppress_output = False
# display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, display_line))
if self.become.expect_prompt() and self.become.check_password_prompt(b_line):
display.debug(u"become_prompt: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_prompt'] = True
suppress_output = True
elif self.become.success and self.become.check_success(b_line):
display.debug(u"become_success: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_success'] = True
suppress_output = True
elif sudoable and self.become.check_incorrect_password(b_line):
display.debug(u"become_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_error'] = True
elif sudoable and self.become.check_missing_password(b_line):
display.debug(u"become_nopasswd_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_nopasswd_error'] = True
if not suppress_output:
output.append(b_line)
# The chunk we read was most likely a series of complete lines, but just
# in case the last line was incomplete (and not a prompt, which we would
# have removed from the output), we retain it to be processed with the
# next chunk.
remainder = b''
if output and not output[-1].endswith(b'\n'):
remainder = output[-1]
output = output[:-1]
return b''.join(output), remainder
def _bare_run(self, cmd, in_data, sudoable=True, checkrc=True):
'''
Starts the command and communicates with it until it ends.
'''
# We don't use _shell.quote as this is run on the controller and independent from the shell plugin chosen
display_cmd = u' '.join(shlex.quote(to_text(c)) for c in cmd)
display.vvv(u'SSH: EXEC {0}'.format(display_cmd), host=self.host)
# Start the given command. If we don't need to pipeline data, we can try
# to use a pseudo-tty (ssh will have been invoked with -tt). If we are
# pipelining data, or can't create a pty, we fall back to using plain
# old pipes.
p = None
if isinstance(cmd, (text_type, binary_type)):
cmd = to_bytes(cmd)
else:
cmd = list(map(to_bytes, cmd))
conn_password = self.get_option('password') or self._play_context.password
if not in_data:
try:
# Make sure stdin is a proper pty to avoid tcgetattr errors
master, slave = pty.openpty()
if PY3 and conn_password:
# pylint: disable=unexpected-keyword-arg
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
else:
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = os.fdopen(master, 'wb', 0)
os.close(slave)
except (OSError, IOError):
p = None
if not p:
try:
if PY3 and conn_password:
# pylint: disable=unexpected-keyword-arg
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
else:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdin = p.stdin
except (OSError, IOError) as e:
raise AnsibleError('Unable to execute ssh command line on a controller due to: %s' % to_native(e))
# If we are using SSH password authentication, write the password into
# the pipe we opened in _build_command.
if conn_password:
os.close(self.sshpass_pipe[0])
try:
os.write(self.sshpass_pipe[1], to_bytes(conn_password) + b'\n')
except OSError as e:
# Ignore broken pipe errors if the sshpass process has exited.
if e.errno != errno.EPIPE or p.poll() is None:
raise
os.close(self.sshpass_pipe[1])
#
# SSH state machine
#
# Now we read and accumulate output from the running process until it
# exits. Depending on the circumstances, we may also need to write an
# escalation password and/or pipelined input to the process.
states = [
'awaiting_prompt', 'awaiting_escalation', 'ready_to_send', 'awaiting_exit'
]
# Are we requesting privilege escalation? Right now, we may be invoked
# to execute sftp/scp with sudoable=True, but we can request escalation
# only when using ssh. Otherwise we can send initial data straightaway.
state = states.index('ready_to_send')
if to_bytes(self.get_option('ssh_executable')) in cmd and sudoable:
prompt = getattr(self.become, 'prompt', None)
if prompt:
# We're requesting escalation with a password, so we have to
# wait for a password prompt.
state = states.index('awaiting_prompt')
display.debug(u'Initial state: %s: %s' % (states[state], to_text(prompt)))
elif self.become and self.become.success:
# We're requesting escalation without a password, so we have to
# detect success/failure before sending any initial data.
state = states.index('awaiting_escalation')
display.debug(u'Initial state: %s: %s' % (states[state], to_text(self.become.success)))
# We store accumulated stdout and stderr output from the process here,
# but strip any privilege escalation prompt/confirmation lines first.
# Output is accumulated into tmp_*, complete lines are extracted into
# an array, then checked and removed or copied to stdout or stderr. We
# set any flags based on examining the output in self._flags.
b_stdout = b_stderr = b''
b_tmp_stdout = b_tmp_stderr = b''
self._flags = dict(
become_prompt=False, become_success=False,
become_error=False, become_nopasswd_error=False
)
# select timeout should be longer than the connect timeout, otherwise
# they will race each other when we can't connect, and the connect
# timeout usually fails
timeout = 2 + self.get_option('timeout')
for fd in (p.stdout, p.stderr):
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
# TODO: bcoca would like to use SelectSelector() when open
# select is faster when filehandles is low and we only ever handle 1.
selector = selectors.DefaultSelector()
selector.register(p.stdout, selectors.EVENT_READ)
selector.register(p.stderr, selectors.EVENT_READ)
# If we can send initial data without waiting for anything, we do so
# before we start polling
if states[state] == 'ready_to_send' and in_data:
self._send_initial_data(stdin, in_data, p)
state += 1
try:
while True:
poll = p.poll()
events = selector.select(timeout)
# We pay attention to timeouts only while negotiating a prompt.
if not events:
# We timed out
if state <= states.index('awaiting_escalation'):
# If the process has already exited, then it's not really a
# timeout; we'll let the normal error handling deal with it.
if poll is not None:
break
self._terminate_process(p)
raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, to_native(b_stdout)))
# Read whatever output is available on stdout and stderr, and stop
# listening to the pipe if it's been closed.
for key, event in events:
if key.fileobj == p.stdout:
b_chunk = p.stdout.read()
if b_chunk == b'':
# stdout has been closed, stop watching it
selector.unregister(p.stdout)
# When ssh has ControlMaster (+ControlPath/Persist) enabled, the
# first connection goes into the background and we never see EOF
# on stderr. If we see EOF on stdout, lower the select timeout
# to reduce the time wasted selecting on stderr if we observe
# that the process has not yet existed after this EOF. Otherwise
# we may spend a long timeout period waiting for an EOF that is
# not going to arrive until the persisted connection closes.
timeout = 1
b_tmp_stdout += b_chunk
display.debug(u"stdout chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
elif key.fileobj == p.stderr:
b_chunk = p.stderr.read()
if b_chunk == b'':
# stderr has been closed, stop watching it
selector.unregister(p.stderr)
b_tmp_stderr += b_chunk
display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
# We examine the output line-by-line until we have negotiated any
# privilege escalation prompt and subsequent success/error message.
# Afterwards, we can accumulate output without looking at it.
if state < states.index('ready_to_send'):
if b_tmp_stdout:
b_output, b_unprocessed = self._examine_output('stdout', states[state], b_tmp_stdout, sudoable)
b_stdout += b_output
b_tmp_stdout = b_unprocessed
if b_tmp_stderr:
b_output, b_unprocessed = self._examine_output('stderr', states[state], b_tmp_stderr, sudoable)
b_stderr += b_output
b_tmp_stderr = b_unprocessed
else:
b_stdout += b_tmp_stdout
b_stderr += b_tmp_stderr
b_tmp_stdout = b_tmp_stderr = b''
# If we see a privilege escalation prompt, we send the password.
# (If we're expecting a prompt but the escalation succeeds, we
# didn't need the password and can carry on regardless.)
if states[state] == 'awaiting_prompt':
if self._flags['become_prompt']:
display.debug(u'Sending become_password in response to prompt')
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
# On python3 stdin is a BufferedWriter, and we don't have a guarantee
# that the write will happen without a flush
stdin.flush()
self._flags['become_prompt'] = False
state += 1
elif self._flags['become_success']:
state += 1
# We've requested escalation (with or without a password), now we
# wait for an error message or a successful escalation.
if states[state] == 'awaiting_escalation':
if self._flags['become_success']:
display.vvv(u'Escalation succeeded')
self._flags['become_success'] = False
state += 1
elif self._flags['become_error']:
display.vvv(u'Escalation failed')
self._terminate_process(p)
self._flags['become_error'] = False
raise AnsibleError('Incorrect %s password' % self.become.name)
elif self._flags['become_nopasswd_error']:
display.vvv(u'Escalation requires password')
self._terminate_process(p)
self._flags['become_nopasswd_error'] = False
raise AnsibleError('Missing %s password' % self.become.name)
elif self._flags['become_prompt']:
# This shouldn't happen, because we should see the "Sorry,
# try again" message first.
display.vvv(u'Escalation prompt repeated')
self._terminate_process(p)
self._flags['become_prompt'] = False
raise AnsibleError('Incorrect %s password' % self.become.name)
# Once we're sure that the privilege escalation prompt, if any, has
# been dealt with, we can send any initial data and start waiting
# for output.
if states[state] == 'ready_to_send':
if in_data:
self._send_initial_data(stdin, in_data, p)
state += 1
# Now we're awaiting_exit: has the child process exited? If it has,
# and we've read all available output from it, we're done.
if poll is not None:
if not selector.get_map() or not events:
break
# We should not see further writes to the stdout/stderr file
# descriptors after the process has closed, set the select
# timeout to gather any last writes we may have missed.
timeout = 0
continue
# If the process has not yet exited, but we've already read EOF from
# its stdout and stderr (and thus no longer watching any file
# descriptors), we can just wait for it to exit.
elif not selector.get_map():
p.wait()
break
# Otherwise there may still be outstanding data to read.
finally:
selector.close()
# close stdin, stdout, and stderr after process is terminated and
# stdout/stderr are read completely (see also issues #848, #64768).
stdin.close()
p.stdout.close()
p.stderr.close()
if self.get_option('host_key_checking'):
if cmd[0] == b"sshpass" and p.returncode == 6:
raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support '
'this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
controlpersisterror = b'Bad configuration option: ControlPersist' in b_stderr or b'unknown configuration option: ControlPersist' in b_stderr
if p.returncode != 0 and controlpersisterror:
raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" '
'(or ssh_args in [ssh_connection] section of the config file) before running again')
# If we find a broken pipe because of ControlPersist timeout expiring (see #16731),
# we raise a special exception so that we can retry a connection.
controlpersist_broken_pipe = b'mux_client_hello_exchange: write packet: Broken pipe' in b_stderr
if p.returncode == 255:
additional = to_native(b_stderr)
if controlpersist_broken_pipe:
raise AnsibleControlPersistBrokenPipeError('Data could not be sent because of ControlPersist broken pipe: %s' % additional)
elif in_data and checkrc:
raise AnsibleConnectionFailure('Data could not be sent to remote host "%s". Make sure this host can be reached over ssh: %s'
% (self.host, additional))
return (p.returncode, b_stdout, b_stderr)
@_ssh_retry
def _run(self, cmd, in_data, sudoable=True, checkrc=True):
"""Wrapper around _bare_run that retries the connection
"""
return self._bare_run(cmd, in_data, sudoable=sudoable, checkrc=checkrc)
@_ssh_retry
def _file_transport_command(self, in_path, out_path, sftp_action):
# scp and sftp require square brackets for IPv6 addresses, but
# accept them for hostnames and IPv4 addresses too.
host = '[%s]' % self.host
smart_methods = ['sftp', 'scp', 'piped']
# Windows does not support dd so we cannot use the piped method
if getattr(self._shell, "_IS_WINDOWS", False):
smart_methods.remove('piped')
# Transfer methods to try
methods = []
# Use the transfer_method option if set, otherwise use scp_if_ssh
ssh_transfer_method = self.get_option('ssh_transfer_method')
scp_if_ssh = self.get_option('scp_if_ssh')
if ssh_transfer_method is None and scp_if_ssh == 'smart':
ssh_transfer_method = 'smart'
if ssh_transfer_method is not None:
if ssh_transfer_method == 'smart':
methods = smart_methods
else:
methods = [ssh_transfer_method]
else:
# since this can be a non-bool now, we need to handle it correctly
if not isinstance(scp_if_ssh, bool):
scp_if_ssh = scp_if_ssh.lower()
if scp_if_ssh in BOOLEANS:
scp_if_ssh = boolean(scp_if_ssh, strict=False)
elif scp_if_ssh != 'smart':
raise AnsibleOptionsError('scp_if_ssh needs to be one of [smart|True|False]')
if scp_if_ssh == 'smart':
methods = smart_methods
elif scp_if_ssh is True:
methods = ['scp']
else:
methods = ['sftp']
for method in methods:
returncode = stdout = stderr = None
if method == 'sftp':
cmd = self._build_command(self.get_option('sftp_executable'), 'sftp', to_bytes(host))
in_data = u"{0} {1} {2}\n".format(sftp_action, shlex.quote(in_path), shlex.quote(out_path))
in_data = to_bytes(in_data, nonstring='passthru')
(returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
elif method == 'scp':
scp = self.get_option('scp_executable')
if sftp_action == 'get':
cmd = self._build_command(scp, 'scp', u'{0}:{1}'.format(host, self._shell.quote(in_path)), out_path)
else:
cmd = self._build_command(scp, 'scp', in_path, u'{0}:{1}'.format(host, self._shell.quote(out_path)))
in_data = None
(returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
elif method == 'piped':
if sftp_action == 'get':
# we pass sudoable=False to disable pty allocation, which
# would end up mixing stdout/stderr and screwing with newlines
(returncode, stdout, stderr) = self.exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), sudoable=False)
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
out_file.write(stdout)
else:
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as f:
in_data = to_bytes(f.read(), nonstring='passthru')
if not in_data:
count = ' count=0'
else:
count = ''
(returncode, stdout, stderr) = self.exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), in_data=in_data, sudoable=False)
# Check the return code and rollover to next method if failed
if returncode == 0:
return (returncode, stdout, stderr)
else:
# If not in smart mode, the data will be printed by the raise below
if len(methods) > 1:
display.warning(u'%s transfer mechanism failed on %s. Use ANSIBLE_DEBUG=1 to see detailed information' % (method, host))
display.debug(u'%s' % to_text(stdout))
display.debug(u'%s' % to_text(stderr))
if returncode == 255:
raise AnsibleConnectionFailure("Failed to connect to the host via %s: %s" % (method, to_native(stderr)))
else:
raise AnsibleError("failed to transfer file to %s %s:\n%s\n%s" %
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
def _escape_win_path(self, path):
""" converts a Windows path to one that's supported by SFTP and SCP """
# If using a root path then we need to start with /
prefix = ""
if re.match(r'^\w{1}:', path):
prefix = "/"
# Convert all '\' to '/'
return "%s%s" % (prefix, path.replace("\\", "/"))
#
# Main public methods
#
def exec_command(self, cmd, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self.user), host=self.host)
if getattr(self._shell, "_IS_WINDOWS", False):
# Become method 'runas' is done in the wrapper that is executed,
# need to disable sudoable so the bare_run is not waiting for a
# prompt that will not occur
sudoable = False
# Make sure our first command is to set the console encoding to
# utf-8, this must be done via chcp to get utf-8 (65001)
cmd_parts = ["chcp.com", "65001", self._shell._SHELL_REDIRECT_ALLNULL, self._shell._SHELL_AND]
cmd_parts.extend(self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False))
cmd = ' '.join(cmd_parts)
# we can only use tty when we are not pipelining the modules. piping
# data into /usr/bin/python inside a tty automatically invokes the
# python interactive-mode but the modules are not compatible with the
# interactive-mode ("unexpected indent" mainly because of empty lines)
ssh_executable = self.get_option('ssh_executable')
# -tt can cause various issues in some environments so allow the user
# to disable it as a troubleshooting method.
use_tty = self.get_option('use_tty')
if not in_data and sudoable and use_tty:
args = ('-tt', self.host, cmd)
else:
args = (self.host, cmd)
cmd = self._build_command(ssh_executable, 'ssh', *args)
(returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)
# When running on Windows, stderr may contain CLIXML encoded output
if getattr(self._shell, "_IS_WINDOWS", False) and stderr.startswith(b"#< CLIXML"):
stderr = _parse_clixml(stderr)
return (returncode, stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
if getattr(self._shell, "_IS_WINDOWS", False):
out_path = self._escape_win_path(out_path)
return self._file_transport_command(in_path, out_path, 'put')
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
# need to add / if path is rooted
if getattr(self._shell, "_IS_WINDOWS", False):
in_path = self._escape_win_path(in_path)
return self._file_transport_command(in_path, out_path, 'get')
def reset(self):
run_reset = False
# If we have a persistent ssh connection (ControlPersist), we can ask it to stop listening.
# only run the reset if the ControlPath already exists or if it isn't configured and ControlPersist is set
# 'check' will determine this.
cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'check', self.host)
display.vvv(u'sending connection check: %s' % to_text(cmd))
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
status_code = p.wait()
if status_code != 0:
display.vvv(u"No connection to reset: %s" % to_text(stderr))
else:
run_reset = True
if run_reset:
cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'stop', self.host)
display.vvv(u'sending connection stop: %s' % to_text(cmd))
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
status_code = p.wait()
if status_code != 0:
display.warning(u"Failed to reset connection:%s" % to_text(stderr))
self.close()
def close(self):
self._connected = False
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 59,206 |
Ansible generates a bad SSH commandline when there's a space in the 'control_path'
|
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
When for any reason the .ansible/ directory is inside a path **with whitespaces**, the 'control_path' generated by default is wrong, and thus the SSH commandline.
The addition of quotes surrounding the 'control_path' inside 'ansible/plugins/connection/ssh.py' fixes the bug.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
Connection plugin
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```
ansible 2.8.2
config file = /Users/pposca/proyectos/kipeos-devops/ansible.cfg
configured module search path = ['/Users/pposca/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/pposca/.virtualenvs/kipeos-devops/lib/python3.7/site-packages/ansible
executable location = /Users/pposca/.virtualenvs/kipeos-devops/bin/ansible
python version = 3.7.3 (default, Jul 7 2019, 18:15:21) [Clang 10.0.1 (clang-1001.0.46.4)]
```
##### CONFIGURATION
<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
```
DEFAULT_HOST_LIST(/Users/pposca/proyectos/kipeos-devops/ansible.cfg) = ['/Users/pposca/proyectos/kipeos-devops/hosts']
INTERPRETER_PYTHON(/Users/pposca/proyectos/kipeos-devops/ansible.cfg) = auto
```
##### OS / ENVIRONMENT
- MacOS 10.14.5
- Mackup (so .ansible/ is a symlink to a directory with whitespaces in its path)
- Python 3.7.3
##### STEPS TO REPRODUCE
Having .ansible inside a directory with whitespaces in its path:
```
ansible all -m ping
```
##### EXPECTED RESULTS
```
mydomain.com | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python3"
},
"changed": false,
"ping": "pong"
}
```
##### ACTUAL RESULTS
```
ansible 2.8.2
config file = /Users/pposca/proyectos/kipeos-devops/ansible.cfg
configured module search path = ['/Users/pposca/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/pposca/.virtualenvs/kipeos-devops/lib/python3.7/site-packages/ansible
executable location = /Users/pposca/.virtualenvs/kipeos-devops/bin/ansible
python version = 3.7.3 (default, Jul 7 2019, 18:15:21) [Clang 10.0.1 (clang-1001.0.46.4)]
Using /Users/pposca/proyectos/kipeos-devops/ansible.cfg as config file
setting up inventory plugins
host_list declined parsing /Users/pposca/proyectos/kipeos-devops/hosts as it did not pass it's verify_file() method
script declined parsing /Users/pposca/proyectos/kipeos-devops/hosts as it did not pass it's verify_file() method
auto declined parsing /Users/pposca/proyectos/kipeos-devops/hosts as it did not pass it's verify_file() method
Parsed /Users/pposca/proyectos/kipeos-devops/hosts inventory source with ini plugin
Loading callback plugin minimal of type stdout, v2.0 from /Users/pposca/.virtualenvs/kipeos-devops/lib/python3.7/site-packages/ansible/plugins/callback/minimal.py
META: ran handlers
<mydomain.com> ESTABLISH SSH CONNECTION FOR USER: None
<mydomain.com> SSH: EXEC ssh -vvv -C -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o 'ControlPath=/Volumes/Apple HDD/VDrive/Dropbox/Mackup/.ansible/cp/fc95ded756' mydomain.com '/bin/sh -c '"'"'echo ~ && sleep 0'"'"''
<mydomain.com> (255, b'', b'command-line line 0: garbage at end of line; "HDD/VDrive/Dropbox/Mackup/.ansible/cp/fc95ded756".\r\n')
mydomain.com | UNREACHABLE! => {
"changed": false,
"msg": "Failed to connect to the host via ssh: command-line line 0: garbage at end of line; \"HDD/VDrive/Dropbox/Mackup/.ansible/cp/fc95ded756\".",
"unreachable": true
}
```
##### VALUE OF THE VARIABLE 'b_args'
Note the whitespace after 'Apple':
```
(b'-o', b'ControlPath=/Volumes/Apple HDD/VDrive/Dropbox/Mackup/.ansible/cp/fc95ded756')
```
##### RELATED ISSUES
#7713 #31916
|
https://github.com/ansible/ansible/issues/59206
|
https://github.com/ansible/ansible/pull/76424
|
0ef5274a3c6189e8fa6a7d97993c165ab548fe95
|
aa022dba2d141cbd3b862767400ba4f714a9edd1
| 2019-07-17T22:03:30Z |
python
| 2022-01-10T17:29:35Z |
test/integration/targets/connection_ssh/runme.sh
|
#!/usr/bin/env bash
set -ux
# We skip this whole section if the test node doesn't have sshpass on it.
if command -v sshpass > /dev/null; then
# Check if our sshpass supports -P
sshpass -P foo > /dev/null
sshpass_supports_prompt=$?
if [[ $sshpass_supports_prompt -eq 0 ]]; then
# If the prompt is wrong, we'll end up hanging (due to sshpass hanging).
# We should probably do something better here, like timing out in Ansible,
# but this has been the behavior for a long time, before we supported custom
# password prompts.
#
# So we search for a custom password prompt that is clearly wrong and call
# ansible with timeout. If we time out, our custom prompt was successfully
# searched for. It's a weird way of doing things, but it does ensure
# that the flag gets passed to sshpass.
timeout 5 ansible -m ping \
-e ansible_connection=ssh \
-e ansible_sshpass_prompt=notThis: \
-e ansible_password=foo \
-e ansible_user=definitelynotroot \
-i test_connection.inventory \
ssh-pipelining
ret=$?
# 124 is EXIT_TIMEDOUT from gnu coreutils
# 143 is 128+SIGTERM(15) from BusyBox
if [[ $ret -ne 124 && $ret -ne 143 ]]; then
echo "Expected to time out and we did not. Exiting with failure."
exit 1
fi
else
ansible -m ping \
-e ansible_connection=ssh \
-e ansible_sshpass_prompt=notThis: \
-e ansible_password=foo \
-e ansible_user=definitelynotroot \
-i test_connection.inventory \
ssh-pipelining | grep 'customized password prompts'
ret=$?
[[ $ret -eq 0 ]] || exit $ret
fi
fi
set -e
# temporary work-around for issues due to new scp filename checking
# https://github.com/ansible/ansible/issues/52640
if [[ "$(scp -T 2>&1)" == "usage: scp "* ]]; then
# scp supports the -T option
# work-around required
scp_args=("-e" "ansible_scp_extra_args=-T")
else
# scp does not support the -T option
# no work-around required
# however we need to put something in the array to keep older versions of bash happy
scp_args=("-e" "")
fi
# sftp
./posix.sh "$@"
# scp
ANSIBLE_SCP_IF_SSH=true ./posix.sh "$@" "${scp_args[@]}"
# piped
ANSIBLE_SSH_TRANSFER_METHOD=piped ./posix.sh "$@"
# test config defaults override
ansible-playbook check_ssh_defaults.yml "$@" -i test_connection.inventory
# ensure we can load from ini cfg
ANSIBLE_CONFIG=./test_ssh_defaults.cfg ansible-playbook verify_config.yml "$@"
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 72,263 |
fact_path in ansible.cfg has no effect on ad-hoc setup or setup invoked as task
|
##### SUMMARY
When setting `fact_path` in `ansible.cfg`, the configured path is used only for `gather_facts`; it has no effect when using `setup` as a task or when running `ansible -m setup` ad-hoc. This behavior is in fact [documented](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/setup_module.html#):
> The default fact_path can be specified in ansible.cfg for when setup is automatically called as part of gather_facts
This is particularly painful when the controller is running, say, FreeBSD, as they package Ansible with a default of `/usr/local/etc/ansible/facts.d` which differs greatly from Ansible's typical default of `/etc/ansible/facts.d`. Note that I do not think this is a FreeBSD packaging issue.
##### ISSUE TYPE
- Feature Request
##### COMPONENT NAME
setup
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```
ansible 2.10.2
config file = /Users/jpm/.ansible.cfg
configured module search path = ['/etc/ansible/library']
ansible python module location = /private/tmp/an/env.v3/lib/python3.8/site-packages/ansible
executable location = /private/tmp/an/env.v3/bin/ansible
python version = 3.8.5 (default, Sep 27 2020, 11:38:54) [Clang 12.0.0 (clang-1200.0.32.2)]
```
##### CONFIGURATION
<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
```
ANSIBLE_NOCOWS(/Users/jpm/.ansible.cfg) = True
ANSIBLE_SSH_ARGS(/Users/jpm/.ansible.cfg) = -o PasswordAuthentication=no -o ControlMaster=auto -o ControlPersist=60s -o ControlPath=/tmp/ansible-ssh-%h-%p-%r -o PreferredAuthentications=publickey,gssapi-with-mic,gssapi-keyex,hostbased
DEFAULT_ACTION_PLUGIN_PATH(/Users/jpm/.ansible.cfg) = ['/etc/ansible/jp.action_plugins']
DEFAULT_FACT_PATH(/Users/jpm/.ansible.cfg) = /tmp/jp/facts.d
DEFAULT_FILTER_PLUGIN_PATH(/Users/jpm/.ansible.cfg) = ['/etc/ansible/jp.filter_plugins']
DEFAULT_HOST_LIST(/Users/jpm/.ansible.cfg) = ['/etc/ansible/hosts']
DEFAULT_LOAD_CALLBACK_PLUGINS(/Users/jpm/.ansible.cfg) = True
DEFAULT_MANAGED_STR(/Users/jpm/.ansible.cfg) = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
DEFAULT_MODULE_PATH(/Users/jpm/.ansible.cfg) = ['/etc/ansible/library']
RETRY_FILES_ENABLED(/Users/jpm/.ansible.cfg) = False
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
Linux, FreeBSD, MacOSX
##### STEPS TO REPRODUCE
1. Create an new directory (`/tmp/jp/facts.d`) and a fact file within
2. Run playbook
3. Run ad-hoc `setup`
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- hosts: 127.0.0.1
connection: local
gather_facts: true
tasks:
- debug: msg='{{ ansible_local.hw.name }}'
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
I expect the ad-hoc `setup` to produce the same values as the setup invocation via `gather_facts`
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
```console
$ cat /tmp/jp/facts.d/hw.fact
{ "name" : "Jane Jolie" }
$ ansible-playbook test.yml
PLAY [127.0.0.1] ********************************************************************
TASK [Gathering Facts] **************************************************************
ok: [localhost]
TASK [debug] ************************************************************************
ok: [localhost] => {
"msg": "Jane Jolie"
}
```
This does not work as expected:
```console
$ ansible 127.0.0.1 -c local -m setup -a filter=ansible_local
localhost | SUCCESS => {
"ansible_facts": {
"ansible_local": {}
},
"changed": false
}
```
|
https://github.com/ansible/ansible/issues/72263
|
https://github.com/ansible/ansible/pull/76053
|
c53e6d94e987c27f60bed55a2dc8001ffcd10cd2
|
0b6d3312dd4dcc7a53b9027e07723cd5752a65ea
| 2020-10-20T12:04:47Z |
python
| 2022-01-10T22:25:00Z |
changelogs/fragments/deprecate_default_fact_path_setting.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 72,263 |
fact_path in ansible.cfg has no effect on ad-hoc setup or setup invoked as task
|
##### SUMMARY
When setting `fact_path` in `ansible.cfg`, the configured path is used only for `gather_facts`; it has no effect when using `setup` as a task or when running `ansible -m setup` ad-hoc. This behavior is in fact [documented](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/setup_module.html#):
> The default fact_path can be specified in ansible.cfg for when setup is automatically called as part of gather_facts
This is particularly painful when the controller is running, say, FreeBSD, as they package Ansible with a default of `/usr/local/etc/ansible/facts.d` which differs greatly from Ansible's typical default of `/etc/ansible/facts.d`. Note that I do not think this is a FreeBSD packaging issue.
##### ISSUE TYPE
- Feature Request
##### COMPONENT NAME
setup
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```
ansible 2.10.2
config file = /Users/jpm/.ansible.cfg
configured module search path = ['/etc/ansible/library']
ansible python module location = /private/tmp/an/env.v3/lib/python3.8/site-packages/ansible
executable location = /private/tmp/an/env.v3/bin/ansible
python version = 3.8.5 (default, Sep 27 2020, 11:38:54) [Clang 12.0.0 (clang-1200.0.32.2)]
```
##### CONFIGURATION
<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
```
ANSIBLE_NOCOWS(/Users/jpm/.ansible.cfg) = True
ANSIBLE_SSH_ARGS(/Users/jpm/.ansible.cfg) = -o PasswordAuthentication=no -o ControlMaster=auto -o ControlPersist=60s -o ControlPath=/tmp/ansible-ssh-%h-%p-%r -o PreferredAuthentications=publickey,gssapi-with-mic,gssapi-keyex,hostbased
DEFAULT_ACTION_PLUGIN_PATH(/Users/jpm/.ansible.cfg) = ['/etc/ansible/jp.action_plugins']
DEFAULT_FACT_PATH(/Users/jpm/.ansible.cfg) = /tmp/jp/facts.d
DEFAULT_FILTER_PLUGIN_PATH(/Users/jpm/.ansible.cfg) = ['/etc/ansible/jp.filter_plugins']
DEFAULT_HOST_LIST(/Users/jpm/.ansible.cfg) = ['/etc/ansible/hosts']
DEFAULT_LOAD_CALLBACK_PLUGINS(/Users/jpm/.ansible.cfg) = True
DEFAULT_MANAGED_STR(/Users/jpm/.ansible.cfg) = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
DEFAULT_MODULE_PATH(/Users/jpm/.ansible.cfg) = ['/etc/ansible/library']
RETRY_FILES_ENABLED(/Users/jpm/.ansible.cfg) = False
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
Linux, FreeBSD, MacOSX
##### STEPS TO REPRODUCE
1. Create an new directory (`/tmp/jp/facts.d`) and a fact file within
2. Run playbook
3. Run ad-hoc `setup`
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- hosts: 127.0.0.1
connection: local
gather_facts: true
tasks:
- debug: msg='{{ ansible_local.hw.name }}'
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
I expect the ad-hoc `setup` to produce the same values as the setup invocation via `gather_facts`
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
```console
$ cat /tmp/jp/facts.d/hw.fact
{ "name" : "Jane Jolie" }
$ ansible-playbook test.yml
PLAY [127.0.0.1] ********************************************************************
TASK [Gathering Facts] **************************************************************
ok: [localhost]
TASK [debug] ************************************************************************
ok: [localhost] => {
"msg": "Jane Jolie"
}
```
This does not work as expected:
```console
$ ansible 127.0.0.1 -c local -m setup -a filter=ansible_local
localhost | SUCCESS => {
"ansible_facts": {
"ansible_local": {}
},
"changed": false
}
```
|
https://github.com/ansible/ansible/issues/72263
|
https://github.com/ansible/ansible/pull/76053
|
c53e6d94e987c27f60bed55a2dc8001ffcd10cd2
|
0b6d3312dd4dcc7a53b9027e07723cd5752a65ea
| 2020-10-20T12:04:47Z |
python
| 2022-01-10T22:25:00Z |
lib/ansible/config/base.yml
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
---
ALLOW_WORLD_READABLE_TMPFILES:
name: Allow world-readable temporary files
description:
- This setting has been moved to the individual shell plugins as a plugin option :ref:`shell_plugins`.
- The existing configuration settings are still accepted with the shell plugin adding additional options, like variables.
- This message will be removed in 2.14.
type: boolean
default: False
deprecated: # (kept for autodetection and removal, deprecation is irrelevant since w/o settings this can never show runtime msg)
why: moved to shell plugins
version: "2.14"
alternatives: 'world_readable_tmp'
ANSIBLE_CONNECTION_PATH:
name: Path of ansible-connection script
default: null
description:
- Specify where to look for the ansible-connection script. This location will be checked before searching $PATH.
- If null, ansible will start with the same directory as the ansible script.
type: path
env: [{name: ANSIBLE_CONNECTION_PATH}]
ini:
- {key: ansible_connection_path, section: persistent_connection}
yaml: {key: persistent_connection.ansible_connection_path}
version_added: "2.8"
ANSIBLE_COW_SELECTION:
name: Cowsay filter selection
default: default
description: This allows you to chose a specific cowsay stencil for the banners or use 'random' to cycle through them.
env: [{name: ANSIBLE_COW_SELECTION}]
ini:
- {key: cow_selection, section: defaults}
ANSIBLE_COW_ACCEPTLIST:
name: Cowsay filter acceptance list
default: ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant', 'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep', 'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder', 'vader-koala', 'vader', 'www']
description: White list of cowsay templates that are 'safe' to use, set to empty list if you want to enable all installed templates.
env:
- name: ANSIBLE_COW_WHITELIST
deprecated:
why: normalizing names to new standard
version: "2.15"
alternatives: 'ANSIBLE_COW_ACCEPTLIST'
- name: ANSIBLE_COW_ACCEPTLIST
version_added: '2.11'
ini:
- key: cow_whitelist
section: defaults
deprecated:
why: normalizing names to new standard
version: "2.15"
alternatives: 'cowsay_enabled_stencils'
- key: cowsay_enabled_stencils
section: defaults
version_added: '2.11'
type: list
ANSIBLE_FORCE_COLOR:
name: Force color output
default: False
description: This option forces color mode even when running without a TTY or the "nocolor" setting is True.
env: [{name: ANSIBLE_FORCE_COLOR}]
ini:
- {key: force_color, section: defaults}
type: boolean
yaml: {key: display.force_color}
ANSIBLE_NOCOLOR:
name: Suppress color output
default: False
description: This setting allows suppressing colorizing output, which is used to give a better indication of failure and status information.
env:
- name: ANSIBLE_NOCOLOR
# this is generic convention for CLI programs
- name: NO_COLOR
version_added: '2.11'
ini:
- {key: nocolor, section: defaults}
type: boolean
yaml: {key: display.nocolor}
ANSIBLE_NOCOWS:
name: Suppress cowsay output
default: False
description: If you have cowsay installed but want to avoid the 'cows' (why????), use this.
env: [{name: ANSIBLE_NOCOWS}]
ini:
- {key: nocows, section: defaults}
type: boolean
yaml: {key: display.i_am_no_fun}
ANSIBLE_COW_PATH:
name: Set path to cowsay command
default: null
description: Specify a custom cowsay path or swap in your cowsay implementation of choice
env: [{name: ANSIBLE_COW_PATH}]
ini:
- {key: cowpath, section: defaults}
type: string
yaml: {key: display.cowpath}
ANSIBLE_PIPELINING:
name: Connection pipelining
default: False
description:
- This is a global option, each connection plugin can override either by having more specific options or not supporting pipelining at all.
- Pipelining, if supported by the connection plugin, reduces the number of network operations required to execute a module on the remote server,
by executing many Ansible modules without actual file transfer.
- It can result in a very significant performance improvement when enabled.
- "However this conflicts with privilege escalation (become). For example, when using 'sudo:' operations you must first
disable 'requiretty' in /etc/sudoers on all managed hosts, which is why it is disabled by default."
- This setting will be disabled if ``ANSIBLE_KEEP_REMOTE_FILES`` is enabled.
env:
- name: ANSIBLE_PIPELINING
ini:
- section: defaults
key: pipelining
- section: connection
key: pipelining
type: boolean
ANY_ERRORS_FATAL:
name: Make Task failures fatal
default: False
description: Sets the default value for the any_errors_fatal keyword, if True, Task failures will be considered fatal errors.
env:
- name: ANSIBLE_ANY_ERRORS_FATAL
ini:
- section: defaults
key: any_errors_fatal
type: boolean
yaml: {key: errors.any_task_errors_fatal}
version_added: "2.4"
BECOME_ALLOW_SAME_USER:
name: Allow becoming the same user
default: False
description:
- This setting controls if become is skipped when remote user and become user are the same. I.E root sudo to root.
- If executable, it will be run and the resulting stdout will be used as the password.
env: [{name: ANSIBLE_BECOME_ALLOW_SAME_USER}]
ini:
- {key: become_allow_same_user, section: privilege_escalation}
type: boolean
yaml: {key: privilege_escalation.become_allow_same_user}
BECOME_PASSWORD_FILE:
name: Become password file
default: ~
description:
- 'The password file to use for the become plugin. --become-password-file.'
- If executable, it will be run and the resulting stdout will be used as the password.
env: [{name: ANSIBLE_BECOME_PASSWORD_FILE}]
ini:
- {key: become_password_file, section: defaults}
type: path
version_added: '2.12'
AGNOSTIC_BECOME_PROMPT:
name: Display an agnostic become prompt
default: True
type: boolean
description: Display an agnostic become prompt instead of displaying a prompt containing the command line supplied become method
env: [{name: ANSIBLE_AGNOSTIC_BECOME_PROMPT}]
ini:
- {key: agnostic_become_prompt, section: privilege_escalation}
yaml: {key: privilege_escalation.agnostic_become_prompt}
version_added: "2.5"
CACHE_PLUGIN:
name: Persistent Cache plugin
default: memory
description: Chooses which cache plugin to use, the default 'memory' is ephemeral.
env: [{name: ANSIBLE_CACHE_PLUGIN}]
ini:
- {key: fact_caching, section: defaults}
yaml: {key: facts.cache.plugin}
CACHE_PLUGIN_CONNECTION:
name: Cache Plugin URI
default: ~
description: Defines connection or path information for the cache plugin
env: [{name: ANSIBLE_CACHE_PLUGIN_CONNECTION}]
ini:
- {key: fact_caching_connection, section: defaults}
yaml: {key: facts.cache.uri}
CACHE_PLUGIN_PREFIX:
name: Cache Plugin table prefix
default: ansible_facts
description: Prefix to use for cache plugin files/tables
env: [{name: ANSIBLE_CACHE_PLUGIN_PREFIX}]
ini:
- {key: fact_caching_prefix, section: defaults}
yaml: {key: facts.cache.prefix}
CACHE_PLUGIN_TIMEOUT:
name: Cache Plugin expiration timeout
default: 86400
description: Expiration timeout for the cache plugin data
env: [{name: ANSIBLE_CACHE_PLUGIN_TIMEOUT}]
ini:
- {key: fact_caching_timeout, section: defaults}
type: integer
yaml: {key: facts.cache.timeout}
COLLECTIONS_SCAN_SYS_PATH:
name: Scan PYTHONPATH for installed collections
description: A boolean to enable or disable scanning the sys.path for installed collections
default: true
type: boolean
env:
- {name: ANSIBLE_COLLECTIONS_SCAN_SYS_PATH}
ini:
- {key: collections_scan_sys_path, section: defaults}
COLLECTIONS_PATHS:
name: ordered list of root paths for loading installed Ansible collections content
description: >
Colon separated paths in which Ansible will search for collections content.
Collections must be in nested *subdirectories*, not directly in these directories.
For example, if ``COLLECTIONS_PATHS`` includes ``~/.ansible/collections``,
and you want to add ``my.collection`` to that directory, it must be saved as
``~/.ansible/collections/ansible_collections/my/collection``.
default: ~/.ansible/collections:/usr/share/ansible/collections
type: pathspec
env:
- name: ANSIBLE_COLLECTIONS_PATHS # TODO: Deprecate this and ini once PATH has been in a few releases.
- name: ANSIBLE_COLLECTIONS_PATH
version_added: '2.10'
ini:
- key: collections_paths
section: defaults
- key: collections_path
section: defaults
version_added: '2.10'
COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH:
name: Defines behavior when loading a collection that does not support the current Ansible version
description:
- When a collection is loaded that does not support the running Ansible version (via the collection metadata key
`requires_ansible`), the default behavior is to issue a warning and continue anyway. Setting this value to `ignore`
skips the warning entirely, while setting it to `fatal` will immediately halt Ansible execution.
env: [{name: ANSIBLE_COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH}]
ini: [{key: collections_on_ansible_version_mismatch, section: defaults}]
choices: [error, warning, ignore]
default: warning
_COLOR_DEFAULTS: &color
name: placeholder for color settings' defaults
choices: ['black', 'bright gray', 'blue', 'white', 'green', 'bright blue', 'cyan', 'bright green', 'red', 'bright cyan', 'purple', 'bright red', 'yellow', 'bright purple', 'dark gray', 'bright yellow', 'magenta', 'bright magenta', 'normal']
COLOR_CHANGED:
<<: *color
name: Color for 'changed' task status
default: yellow
description: Defines the color to use on 'Changed' task status
env: [{name: ANSIBLE_COLOR_CHANGED}]
ini:
- {key: changed, section: colors}
COLOR_CONSOLE_PROMPT:
<<: *color
name: "Color for ansible-console's prompt task status"
default: white
description: Defines the default color to use for ansible-console
env: [{name: ANSIBLE_COLOR_CONSOLE_PROMPT}]
ini:
- {key: console_prompt, section: colors}
version_added: "2.7"
COLOR_DEBUG:
<<: *color
name: Color for debug statements
default: dark gray
description: Defines the color to use when emitting debug messages
env: [{name: ANSIBLE_COLOR_DEBUG}]
ini:
- {key: debug, section: colors}
COLOR_DEPRECATE:
<<: *color
name: Color for deprecation messages
default: purple
description: Defines the color to use when emitting deprecation messages
env: [{name: ANSIBLE_COLOR_DEPRECATE}]
ini:
- {key: deprecate, section: colors}
COLOR_DIFF_ADD:
<<: *color
name: Color for diff added display
default: green
description: Defines the color to use when showing added lines in diffs
env: [{name: ANSIBLE_COLOR_DIFF_ADD}]
ini:
- {key: diff_add, section: colors}
yaml: {key: display.colors.diff.add}
COLOR_DIFF_LINES:
<<: *color
name: Color for diff lines display
default: cyan
description: Defines the color to use when showing diffs
env: [{name: ANSIBLE_COLOR_DIFF_LINES}]
ini:
- {key: diff_lines, section: colors}
COLOR_DIFF_REMOVE:
<<: *color
name: Color for diff removed display
default: red
description: Defines the color to use when showing removed lines in diffs
env: [{name: ANSIBLE_COLOR_DIFF_REMOVE}]
ini:
- {key: diff_remove, section: colors}
COLOR_ERROR:
<<: *color
name: Color for error messages
default: red
description: Defines the color to use when emitting error messages
env: [{name: ANSIBLE_COLOR_ERROR}]
ini:
- {key: error, section: colors}
yaml: {key: colors.error}
COLOR_HIGHLIGHT:
<<: *color
name: Color for highlighting
default: white
description: Defines the color to use for highlighting
env: [{name: ANSIBLE_COLOR_HIGHLIGHT}]
ini:
- {key: highlight, section: colors}
COLOR_OK:
<<: *color
name: Color for 'ok' task status
default: green
description: Defines the color to use when showing 'OK' task status
env: [{name: ANSIBLE_COLOR_OK}]
ini:
- {key: ok, section: colors}
COLOR_SKIP:
<<: *color
name: Color for 'skip' task status
default: cyan
description: Defines the color to use when showing 'Skipped' task status
env: [{name: ANSIBLE_COLOR_SKIP}]
ini:
- {key: skip, section: colors}
COLOR_UNREACHABLE:
<<: *color
name: Color for 'unreachable' host state
default: bright red
description: Defines the color to use on 'Unreachable' status
env: [{name: ANSIBLE_COLOR_UNREACHABLE}]
ini:
- {key: unreachable, section: colors}
COLOR_VERBOSE:
<<: *color
name: Color for verbose messages
default: blue
description: Defines the color to use when emitting verbose messages. i.e those that show with '-v's.
env: [{name: ANSIBLE_COLOR_VERBOSE}]
ini:
- {key: verbose, section: colors}
COLOR_WARN:
<<: *color
name: Color for warning messages
default: bright purple
description: Defines the color to use when emitting warning messages
env: [{name: ANSIBLE_COLOR_WARN}]
ini:
- {key: warn, section: colors}
CONNECTION_PASSWORD_FILE:
name: Connection password file
default: ~
description: 'The password file to use for the connection plugin. --connection-password-file.'
env: [{name: ANSIBLE_CONNECTION_PASSWORD_FILE}]
ini:
- {key: connection_password_file, section: defaults}
type: path
version_added: '2.12'
COVERAGE_REMOTE_OUTPUT:
name: Sets the output directory and filename prefix to generate coverage run info.
description:
- Sets the output directory on the remote host to generate coverage reports to.
- Currently only used for remote coverage on PowerShell modules.
- This is for internal use only.
env:
- {name: _ANSIBLE_COVERAGE_REMOTE_OUTPUT}
vars:
- {name: _ansible_coverage_remote_output}
type: str
version_added: '2.9'
COVERAGE_REMOTE_PATHS:
name: Sets the list of paths to run coverage for.
description:
- A list of paths for files on the Ansible controller to run coverage for when executing on the remote host.
- Only files that match the path glob will have its coverage collected.
- Multiple path globs can be specified and are separated by ``:``.
- Currently only used for remote coverage on PowerShell modules.
- This is for internal use only.
default: '*'
env:
- {name: _ANSIBLE_COVERAGE_REMOTE_PATH_FILTER}
type: str
version_added: '2.9'
ACTION_WARNINGS:
name: Toggle action warnings
default: True
description:
- By default Ansible will issue a warning when received from a task action (module or action plugin)
- These warnings can be silenced by adjusting this setting to False.
env: [{name: ANSIBLE_ACTION_WARNINGS}]
ini:
- {key: action_warnings, section: defaults}
type: boolean
version_added: "2.5"
COMMAND_WARNINGS:
name: Command module warnings
default: False
description:
- Ansible can issue a warning when the shell or command module is used and the command appears to be similar to an existing Ansible module.
- These warnings can be silenced by adjusting this setting to False. You can also control this at the task level with the module option ``warn``.
- As of version 2.11, this is disabled by default.
env: [{name: ANSIBLE_COMMAND_WARNINGS}]
ini:
- {key: command_warnings, section: defaults}
type: boolean
version_added: "1.8"
deprecated:
why: the command warnings feature is being removed
version: "2.14"
LOCALHOST_WARNING:
name: Warning when using implicit inventory with only localhost
default: True
description:
- By default Ansible will issue a warning when there are no hosts in the
inventory.
- These warnings can be silenced by adjusting this setting to False.
env: [{name: ANSIBLE_LOCALHOST_WARNING}]
ini:
- {key: localhost_warning, section: defaults}
type: boolean
version_added: "2.6"
DOC_FRAGMENT_PLUGIN_PATH:
name: documentation fragment plugins path
default: ~/.ansible/plugins/doc_fragments:/usr/share/ansible/plugins/doc_fragments
description: Colon separated paths in which Ansible will search for Documentation Fragments Plugins.
env: [{name: ANSIBLE_DOC_FRAGMENT_PLUGINS}]
ini:
- {key: doc_fragment_plugins, section: defaults}
type: pathspec
DEFAULT_ACTION_PLUGIN_PATH:
name: Action plugins path
default: ~/.ansible/plugins/action:/usr/share/ansible/plugins/action
description: Colon separated paths in which Ansible will search for Action Plugins.
env: [{name: ANSIBLE_ACTION_PLUGINS}]
ini:
- {key: action_plugins, section: defaults}
type: pathspec
yaml: {key: plugins.action.path}
DEFAULT_ALLOW_UNSAFE_LOOKUPS:
name: Allow unsafe lookups
default: False
description:
- "When enabled, this option allows lookup plugins (whether used in variables as ``{{lookup('foo')}}`` or as a loop as with_foo)
to return data that is not marked 'unsafe'."
- By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language,
as this could represent a security risk. This option is provided to allow for backward compatibility,
however users should first consider adding allow_unsafe=True to any lookups which may be expected to contain data which may be run
through the templating engine late
env: []
ini:
- {key: allow_unsafe_lookups, section: defaults}
type: boolean
version_added: "2.2.3"
DEFAULT_ASK_PASS:
name: Ask for the login password
default: False
description:
- This controls whether an Ansible playbook should prompt for a login password.
If using SSH keys for authentication, you probably do not need to change this setting.
env: [{name: ANSIBLE_ASK_PASS}]
ini:
- {key: ask_pass, section: defaults}
type: boolean
yaml: {key: defaults.ask_pass}
DEFAULT_ASK_VAULT_PASS:
name: Ask for the vault password(s)
default: False
description:
- This controls whether an Ansible playbook should prompt for a vault password.
env: [{name: ANSIBLE_ASK_VAULT_PASS}]
ini:
- {key: ask_vault_pass, section: defaults}
type: boolean
DEFAULT_BECOME:
name: Enable privilege escalation (become)
default: False
description: Toggles the use of privilege escalation, allowing you to 'become' another user after login.
env: [{name: ANSIBLE_BECOME}]
ini:
- {key: become, section: privilege_escalation}
type: boolean
DEFAULT_BECOME_ASK_PASS:
name: Ask for the privilege escalation (become) password
default: False
description: Toggle to prompt for privilege escalation password.
env: [{name: ANSIBLE_BECOME_ASK_PASS}]
ini:
- {key: become_ask_pass, section: privilege_escalation}
type: boolean
DEFAULT_BECOME_METHOD:
name: Choose privilege escalation method
default: 'sudo'
description: Privilege escalation method to use when `become` is enabled.
env: [{name: ANSIBLE_BECOME_METHOD}]
ini:
- {section: privilege_escalation, key: become_method}
DEFAULT_BECOME_EXE:
name: Choose 'become' executable
default: ~
description: 'executable to use for privilege escalation, otherwise Ansible will depend on PATH'
env: [{name: ANSIBLE_BECOME_EXE}]
ini:
- {key: become_exe, section: privilege_escalation}
DEFAULT_BECOME_FLAGS:
name: Set 'become' executable options
default: ~
description: Flags to pass to the privilege escalation executable.
env: [{name: ANSIBLE_BECOME_FLAGS}]
ini:
- {key: become_flags, section: privilege_escalation}
BECOME_PLUGIN_PATH:
name: Become plugins path
default: ~/.ansible/plugins/become:/usr/share/ansible/plugins/become
description: Colon separated paths in which Ansible will search for Become Plugins.
env: [{name: ANSIBLE_BECOME_PLUGINS}]
ini:
- {key: become_plugins, section: defaults}
type: pathspec
version_added: "2.8"
DEFAULT_BECOME_USER:
# FIXME: should really be blank and make -u passing optional depending on it
name: Set the user you 'become' via privilege escalation
default: root
description: The user your login/remote user 'becomes' when using privilege escalation, most systems will use 'root' when no user is specified.
env: [{name: ANSIBLE_BECOME_USER}]
ini:
- {key: become_user, section: privilege_escalation}
yaml: {key: become.user}
DEFAULT_CACHE_PLUGIN_PATH:
name: Cache Plugins Path
default: ~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache
description: Colon separated paths in which Ansible will search for Cache Plugins.
env: [{name: ANSIBLE_CACHE_PLUGINS}]
ini:
- {key: cache_plugins, section: defaults}
type: pathspec
DEFAULT_CALLBACK_PLUGIN_PATH:
name: Callback Plugins Path
default: ~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback
description: Colon separated paths in which Ansible will search for Callback Plugins.
env: [{name: ANSIBLE_CALLBACK_PLUGINS}]
ini:
- {key: callback_plugins, section: defaults}
type: pathspec
yaml: {key: plugins.callback.path}
CALLBACKS_ENABLED:
name: Enable callback plugins that require it.
default: []
description:
- "List of enabled callbacks, not all callbacks need enabling,
but many of those shipped with Ansible do as we don't want them activated by default."
env:
- name: ANSIBLE_CALLBACK_WHITELIST
deprecated:
why: normalizing names to new standard
version: "2.15"
alternatives: 'ANSIBLE_CALLBACKS_ENABLED'
- name: ANSIBLE_CALLBACKS_ENABLED
version_added: '2.11'
ini:
- key: callback_whitelist
section: defaults
deprecated:
why: normalizing names to new standard
version: "2.15"
alternatives: 'callbacks_enabled'
- key: callbacks_enabled
section: defaults
version_added: '2.11'
type: list
DEFAULT_CLICONF_PLUGIN_PATH:
name: Cliconf Plugins Path
default: ~/.ansible/plugins/cliconf:/usr/share/ansible/plugins/cliconf
description: Colon separated paths in which Ansible will search for Cliconf Plugins.
env: [{name: ANSIBLE_CLICONF_PLUGINS}]
ini:
- {key: cliconf_plugins, section: defaults}
type: pathspec
DEFAULT_CONNECTION_PLUGIN_PATH:
name: Connection Plugins Path
default: ~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection
description: Colon separated paths in which Ansible will search for Connection Plugins.
env: [{name: ANSIBLE_CONNECTION_PLUGINS}]
ini:
- {key: connection_plugins, section: defaults}
type: pathspec
yaml: {key: plugins.connection.path}
DEFAULT_DEBUG:
name: Debug mode
default: False
description:
- "Toggles debug output in Ansible. This is *very* verbose and can hinder
multiprocessing. Debug output can also include secret information
despite no_log settings being enabled, which means debug mode should not be used in
production."
env: [{name: ANSIBLE_DEBUG}]
ini:
- {key: debug, section: defaults}
type: boolean
DEFAULT_EXECUTABLE:
name: Target shell executable
default: /bin/sh
description:
- "This indicates the command to use to spawn a shell under for Ansible's execution needs on a target.
Users may need to change this in rare instances when shell usage is constrained, but in most cases it may be left as is."
env: [{name: ANSIBLE_EXECUTABLE}]
ini:
- {key: executable, section: defaults}
DEFAULT_FACT_PATH:
name: local fact path
default: ~
description:
- "This option allows you to globally configure a custom path for 'local_facts' for the implied M(ansible.builtin.setup) task when using fact gathering."
- "If not set, it will fallback to the default from the M(ansible.builtin.setup) module: ``/etc/ansible/facts.d``."
- "This does **not** affect user defined tasks that use the M(ansible.builtin.setup) module."
env: [{name: ANSIBLE_FACT_PATH}]
ini:
- {key: fact_path, section: defaults}
type: string
yaml: {key: facts.gathering.fact_path}
DEFAULT_FILTER_PLUGIN_PATH:
name: Jinja2 Filter Plugins Path
default: ~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter
description: Colon separated paths in which Ansible will search for Jinja2 Filter Plugins.
env: [{name: ANSIBLE_FILTER_PLUGINS}]
ini:
- {key: filter_plugins, section: defaults}
type: pathspec
DEFAULT_FORCE_HANDLERS:
name: Force handlers to run after failure
default: False
description:
- This option controls if notified handlers run on a host even if a failure occurs on that host.
- When false, the handlers will not run if a failure has occurred on a host.
- This can also be set per play or on the command line. See Handlers and Failure for more details.
env: [{name: ANSIBLE_FORCE_HANDLERS}]
ini:
- {key: force_handlers, section: defaults}
type: boolean
version_added: "1.9.1"
DEFAULT_FORKS:
name: Number of task forks
default: 5
description: Maximum number of forks Ansible will use to execute tasks on target hosts.
env: [{name: ANSIBLE_FORKS}]
ini:
- {key: forks, section: defaults}
type: integer
DEFAULT_GATHERING:
name: Gathering behaviour
default: 'implicit'
description:
- This setting controls the default policy of fact gathering (facts discovered about remote systems).
- "When 'implicit' (the default), the cache plugin will be ignored and facts will be gathered per play unless 'gather_facts: False' is set."
- "When 'explicit' the inverse is true, facts will not be gathered unless directly requested in the play."
- "The 'smart' value means each new host that has no facts discovered will be scanned,
but if the same host is addressed in multiple plays it will not be contacted again in the playbook run."
- "This option can be useful for those wishing to save fact gathering time. Both 'smart' and 'explicit' will use the cache plugin."
env: [{name: ANSIBLE_GATHERING}]
ini:
- key: gathering
section: defaults
version_added: "1.6"
choices: ['smart', 'explicit', 'implicit']
DEFAULT_GATHER_SUBSET:
name: Gather facts subset
default: ['all']
description:
- Set the `gather_subset` option for the M(ansible.builtin.setup) task in the implicit fact gathering.
See the module documentation for specifics.
- "It does **not** apply to user defined M(ansible.builtin.setup) tasks."
env: [{name: ANSIBLE_GATHER_SUBSET}]
ini:
- key: gather_subset
section: defaults
version_added: "2.1"
type: list
DEFAULT_GATHER_TIMEOUT:
name: Gather facts timeout
default: 10
description:
- Set the timeout in seconds for the implicit fact gathering.
- "It does **not** apply to user defined M(ansible.builtin.setup) tasks."
env: [{name: ANSIBLE_GATHER_TIMEOUT}]
ini:
- {key: gather_timeout, section: defaults}
type: integer
yaml: {key: defaults.gather_timeout}
DEFAULT_HASH_BEHAVIOUR:
name: Hash merge behaviour
default: replace
type: string
choices:
replace: Any variable that is defined more than once is overwritten using the order from variable precedence rules (highest wins).
merge: Any dictionary variable will be recursively merged with new definitions across the different variable definition sources.
description:
- This setting controls how duplicate definitions of dictionary variables (aka hash, map, associative array) are handled in Ansible.
- This does not affect variables whose values are scalars (integers, strings) or arrays.
- "**WARNING**, changing this setting is not recommended as this is fragile and makes your content (plays, roles, collections) non portable,
leading to continual confusion and misuse. Don't change this setting unless you think you have an absolute need for it."
- We recommend avoiding reusing variable names and relying on the ``combine`` filter and ``vars`` and ``varnames`` lookups
to create merged versions of the individual variables. In our experience this is rarely really needed and a sign that too much
complexity has been introduced into the data structures and plays.
- For some uses you can also look into custom vars_plugins to merge on input, even substituting the default ``host_group_vars``
that is in charge of parsing the ``host_vars/`` and ``group_vars/`` directories. Most users of this setting are only interested in inventory scope,
but the setting itself affects all sources and makes debugging even harder.
- All playbooks and roles in the official examples repos assume the default for this setting.
- Changing the setting to ``merge`` applies across variable sources, but many sources will internally still overwrite the variables.
For example ``include_vars`` will dedupe variables internally before updating Ansible, with 'last defined' overwriting previous definitions in same file.
- The Ansible project recommends you **avoid ``merge`` for new projects.**
- It is the intention of the Ansible developers to eventually deprecate and remove this setting, but it is being kept as some users do heavily rely on it.
New projects should **avoid 'merge'**.
env: [{name: ANSIBLE_HASH_BEHAVIOUR}]
ini:
- {key: hash_behaviour, section: defaults}
DEFAULT_HOST_LIST:
name: Inventory Source
default: /etc/ansible/hosts
description: Comma separated list of Ansible inventory sources
env:
- name: ANSIBLE_INVENTORY
expand_relative_paths: True
ini:
- key: inventory
section: defaults
type: pathlist
yaml: {key: defaults.inventory}
DEFAULT_HTTPAPI_PLUGIN_PATH:
name: HttpApi Plugins Path
default: ~/.ansible/plugins/httpapi:/usr/share/ansible/plugins/httpapi
description: Colon separated paths in which Ansible will search for HttpApi Plugins.
env: [{name: ANSIBLE_HTTPAPI_PLUGINS}]
ini:
- {key: httpapi_plugins, section: defaults}
type: pathspec
DEFAULT_INTERNAL_POLL_INTERVAL:
name: Internal poll interval
default: 0.001
env: []
ini:
- {key: internal_poll_interval, section: defaults}
type: float
version_added: "2.2"
description:
- This sets the interval (in seconds) of Ansible internal processes polling each other.
Lower values improve performance with large playbooks at the expense of extra CPU load.
Higher values are more suitable for Ansible usage in automation scenarios,
when UI responsiveness is not required but CPU usage might be a concern.
- "The default corresponds to the value hardcoded in Ansible <= 2.1"
DEFAULT_INVENTORY_PLUGIN_PATH:
name: Inventory Plugins Path
default: ~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory
description: Colon separated paths in which Ansible will search for Inventory Plugins.
env: [{name: ANSIBLE_INVENTORY_PLUGINS}]
ini:
- {key: inventory_plugins, section: defaults}
type: pathspec
DEFAULT_JINJA2_EXTENSIONS:
name: Enabled Jinja2 extensions
default: []
description:
- This is a developer-specific feature that allows enabling additional Jinja2 extensions.
- "See the Jinja2 documentation for details. If you do not know what these do, you probably don't need to change this setting :)"
env: [{name: ANSIBLE_JINJA2_EXTENSIONS}]
ini:
- {key: jinja2_extensions, section: defaults}
DEFAULT_JINJA2_NATIVE:
name: Use Jinja2's NativeEnvironment for templating
default: False
description: This option preserves variable types during template operations.
env: [{name: ANSIBLE_JINJA2_NATIVE}]
ini:
- {key: jinja2_native, section: defaults}
type: boolean
yaml: {key: jinja2_native}
version_added: 2.7
DEFAULT_KEEP_REMOTE_FILES:
name: Keep remote files
default: False
description:
- Enables/disables the cleaning up of the temporary files Ansible used to execute the tasks on the remote.
- If this option is enabled it will disable ``ANSIBLE_PIPELINING``.
env: [{name: ANSIBLE_KEEP_REMOTE_FILES}]
ini:
- {key: keep_remote_files, section: defaults}
type: boolean
DEFAULT_LIBVIRT_LXC_NOSECLABEL:
# TODO: move to plugin
name: No security label on Lxc
default: False
description:
- "This setting causes libvirt to connect to lxc containers by passing --noseclabel to virsh.
This is necessary when running on systems which do not have SELinux."
env:
- name: LIBVIRT_LXC_NOSECLABEL
deprecated:
why: environment variables without ``ANSIBLE_`` prefix are deprecated
version: "2.12"
alternatives: the ``ANSIBLE_LIBVIRT_LXC_NOSECLABEL`` environment variable
- name: ANSIBLE_LIBVIRT_LXC_NOSECLABEL
ini:
- {key: libvirt_lxc_noseclabel, section: selinux}
type: boolean
version_added: "2.1"
DEFAULT_LOAD_CALLBACK_PLUGINS:
name: Load callbacks for adhoc
default: False
description:
- Controls whether callback plugins are loaded when running /usr/bin/ansible.
This may be used to log activity from the command line, send notifications, and so on.
Callback plugins are always loaded for ``ansible-playbook``.
env: [{name: ANSIBLE_LOAD_CALLBACK_PLUGINS}]
ini:
- {key: bin_ansible_callbacks, section: defaults}
type: boolean
version_added: "1.8"
DEFAULT_LOCAL_TMP:
name: Controller temporary directory
default: ~/.ansible/tmp
description: Temporary directory for Ansible to use on the controller.
env: [{name: ANSIBLE_LOCAL_TEMP}]
ini:
- {key: local_tmp, section: defaults}
type: tmppath
DEFAULT_LOG_PATH:
name: Ansible log file path
default: ~
description: File to which Ansible will log on the controller. When empty logging is disabled.
env: [{name: ANSIBLE_LOG_PATH}]
ini:
- {key: log_path, section: defaults}
type: path
DEFAULT_LOG_FILTER:
name: Name filters for python logger
default: []
description: List of logger names to filter out of the log file
env: [{name: ANSIBLE_LOG_FILTER}]
ini:
- {key: log_filter, section: defaults}
type: list
DEFAULT_LOOKUP_PLUGIN_PATH:
name: Lookup Plugins Path
description: Colon separated paths in which Ansible will search for Lookup Plugins.
default: ~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup
env: [{name: ANSIBLE_LOOKUP_PLUGINS}]
ini:
- {key: lookup_plugins, section: defaults}
type: pathspec
yaml: {key: defaults.lookup_plugins}
DEFAULT_MANAGED_STR:
name: Ansible managed
default: 'Ansible managed'
description: Sets the macro for the 'ansible_managed' variable available for M(ansible.builtin.template) and M(ansible.windows.win_template) modules. This is only relevant for those two modules.
env: []
ini:
- {key: ansible_managed, section: defaults}
yaml: {key: defaults.ansible_managed}
DEFAULT_MODULE_ARGS:
name: Adhoc default arguments
default: ~
description:
- This sets the default arguments to pass to the ``ansible`` adhoc binary if no ``-a`` is specified.
env: [{name: ANSIBLE_MODULE_ARGS}]
ini:
- {key: module_args, section: defaults}
DEFAULT_MODULE_COMPRESSION:
name: Python module compression
default: ZIP_DEFLATED
description: Compression scheme to use when transferring Python modules to the target.
env: []
ini:
- {key: module_compression, section: defaults}
# vars:
# - name: ansible_module_compression
DEFAULT_MODULE_NAME:
name: Default adhoc module
default: command
description: "Module to use with the ``ansible`` AdHoc command, if none is specified via ``-m``."
env: []
ini:
- {key: module_name, section: defaults}
DEFAULT_MODULE_PATH:
name: Modules Path
description: Colon separated paths in which Ansible will search for Modules.
default: ~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules
env: [{name: ANSIBLE_LIBRARY}]
ini:
- {key: library, section: defaults}
type: pathspec
DEFAULT_MODULE_UTILS_PATH:
name: Module Utils Path
description: Colon separated paths in which Ansible will search for Module utils files, which are shared by modules.
default: ~/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils
env: [{name: ANSIBLE_MODULE_UTILS}]
ini:
- {key: module_utils, section: defaults}
type: pathspec
DEFAULT_NETCONF_PLUGIN_PATH:
name: Netconf Plugins Path
default: ~/.ansible/plugins/netconf:/usr/share/ansible/plugins/netconf
description: Colon separated paths in which Ansible will search for Netconf Plugins.
env: [{name: ANSIBLE_NETCONF_PLUGINS}]
ini:
- {key: netconf_plugins, section: defaults}
type: pathspec
DEFAULT_NO_LOG:
name: No log
default: False
description: "Toggle Ansible's display and logging of task details, mainly used to avoid security disclosures."
env: [{name: ANSIBLE_NO_LOG}]
ini:
- {key: no_log, section: defaults}
type: boolean
DEFAULT_NO_TARGET_SYSLOG:
name: No syslog on target
default: False
description:
- Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts this will disable a newer
style PowerShell modules from writting to the event log.
env: [{name: ANSIBLE_NO_TARGET_SYSLOG}]
ini:
- {key: no_target_syslog, section: defaults}
vars:
- name: ansible_no_target_syslog
version_added: '2.10'
type: boolean
yaml: {key: defaults.no_target_syslog}
DEFAULT_NULL_REPRESENTATION:
name: Represent a null
default: ~
description: What templating should return as a 'null' value. When not set it will let Jinja2 decide.
env: [{name: ANSIBLE_NULL_REPRESENTATION}]
ini:
- {key: null_representation, section: defaults}
type: none
DEFAULT_POLL_INTERVAL:
name: Async poll interval
default: 15
description:
- For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling),
this is how often to check back on the status of those tasks when an explicit poll interval is not supplied.
The default is a reasonably moderate 15 seconds which is a tradeoff between checking in frequently and
providing a quick turnaround when something may have completed.
env: [{name: ANSIBLE_POLL_INTERVAL}]
ini:
- {key: poll_interval, section: defaults}
type: integer
DEFAULT_PRIVATE_KEY_FILE:
name: Private key file
default: ~
description:
- Option for connections using a certificate or key file to authenticate, rather than an agent or passwords,
you can set the default value here to avoid re-specifying --private-key with every invocation.
env: [{name: ANSIBLE_PRIVATE_KEY_FILE}]
ini:
- {key: private_key_file, section: defaults}
type: path
DEFAULT_PRIVATE_ROLE_VARS:
name: Private role variables
default: False
description:
- Makes role variables inaccessible from other roles.
- This was introduced as a way to reset role variables to default values if
a role is used more than once in a playbook.
env: [{name: ANSIBLE_PRIVATE_ROLE_VARS}]
ini:
- {key: private_role_vars, section: defaults}
type: boolean
yaml: {key: defaults.private_role_vars}
DEFAULT_REMOTE_PORT:
name: Remote port
default: ~
description: Port to use in remote connections, when blank it will use the connection plugin default.
env: [{name: ANSIBLE_REMOTE_PORT}]
ini:
- {key: remote_port, section: defaults}
type: integer
yaml: {key: defaults.remote_port}
DEFAULT_REMOTE_USER:
name: Login/Remote User
description:
- Sets the login user for the target machines
- "When blank it uses the connection plugin's default, normally the user currently executing Ansible."
env: [{name: ANSIBLE_REMOTE_USER}]
ini:
- {key: remote_user, section: defaults}
DEFAULT_ROLES_PATH:
name: Roles path
default: ~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
description: Colon separated paths in which Ansible will search for Roles.
env: [{name: ANSIBLE_ROLES_PATH}]
expand_relative_paths: True
ini:
- {key: roles_path, section: defaults}
type: pathspec
yaml: {key: defaults.roles_path}
DEFAULT_SELINUX_SPECIAL_FS:
name: Problematic file systems
default: fuse, nfs, vboxsf, ramfs, 9p, vfat
description:
- "Some filesystems do not support safe operations and/or return inconsistent errors,
this setting makes Ansible 'tolerate' those in the list w/o causing fatal errors."
- Data corruption may occur and writes are not always verified when a filesystem is in the list.
env:
- name: ANSIBLE_SELINUX_SPECIAL_FS
version_added: "2.9"
ini:
- {key: special_context_filesystems, section: selinux}
type: list
DEFAULT_STDOUT_CALLBACK:
name: Main display callback plugin
default: default
description:
- "Set the main callback used to display Ansible output, you can only have one at a time."
- You can have many other callbacks, but just one can be in charge of stdout.
env: [{name: ANSIBLE_STDOUT_CALLBACK}]
ini:
- {key: stdout_callback, section: defaults}
ENABLE_TASK_DEBUGGER:
name: Whether to enable the task debugger
default: False
description:
- Whether or not to enable the task debugger, this previously was done as a strategy plugin.
- Now all strategy plugins can inherit this behavior. The debugger defaults to activating when
- a task is failed on unreachable. Use the debugger keyword for more flexibility.
type: boolean
env: [{name: ANSIBLE_ENABLE_TASK_DEBUGGER}]
ini:
- {key: enable_task_debugger, section: defaults}
version_added: "2.5"
TASK_DEBUGGER_IGNORE_ERRORS:
name: Whether a failed task with ignore_errors=True will still invoke the debugger
default: True
description:
- This option defines whether the task debugger will be invoked on a failed task when ignore_errors=True
is specified.
- True specifies that the debugger will honor ignore_errors, False will not honor ignore_errors.
type: boolean
env: [{name: ANSIBLE_TASK_DEBUGGER_IGNORE_ERRORS}]
ini:
- {key: task_debugger_ignore_errors, section: defaults}
version_added: "2.7"
DEFAULT_STRATEGY:
name: Implied strategy
default: 'linear'
description: Set the default strategy used for plays.
env: [{name: ANSIBLE_STRATEGY}]
ini:
- {key: strategy, section: defaults}
version_added: "2.3"
DEFAULT_STRATEGY_PLUGIN_PATH:
name: Strategy Plugins Path
description: Colon separated paths in which Ansible will search for Strategy Plugins.
default: ~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy
env: [{name: ANSIBLE_STRATEGY_PLUGINS}]
ini:
- {key: strategy_plugins, section: defaults}
type: pathspec
DEFAULT_SU:
default: False
description: 'Toggle the use of "su" for tasks.'
env: [{name: ANSIBLE_SU}]
ini:
- {key: su, section: defaults}
type: boolean
yaml: {key: defaults.su}
DEFAULT_SYSLOG_FACILITY:
name: syslog facility
default: LOG_USER
description: Syslog facility to use when Ansible logs to the remote target
env: [{name: ANSIBLE_SYSLOG_FACILITY}]
ini:
- {key: syslog_facility, section: defaults}
DEFAULT_TERMINAL_PLUGIN_PATH:
name: Terminal Plugins Path
default: ~/.ansible/plugins/terminal:/usr/share/ansible/plugins/terminal
description: Colon separated paths in which Ansible will search for Terminal Plugins.
env: [{name: ANSIBLE_TERMINAL_PLUGINS}]
ini:
- {key: terminal_plugins, section: defaults}
type: pathspec
DEFAULT_TEST_PLUGIN_PATH:
name: Jinja2 Test Plugins Path
description: Colon separated paths in which Ansible will search for Jinja2 Test Plugins.
default: ~/.ansible/plugins/test:/usr/share/ansible/plugins/test
env: [{name: ANSIBLE_TEST_PLUGINS}]
ini:
- {key: test_plugins, section: defaults}
type: pathspec
DEFAULT_TIMEOUT:
name: Connection timeout
default: 10
description: This is the default timeout for connection plugins to use.
env: [{name: ANSIBLE_TIMEOUT}]
ini:
- {key: timeout, section: defaults}
type: integer
DEFAULT_TRANSPORT:
# note that ssh_utils refs this and needs to be updated if removed
name: Connection plugin
default: smart
description: "Default connection plugin to use, the 'smart' option will toggle between 'ssh' and 'paramiko' depending on controller OS and ssh versions"
env: [{name: ANSIBLE_TRANSPORT}]
ini:
- {key: transport, section: defaults}
DEFAULT_UNDEFINED_VAR_BEHAVIOR:
name: Jinja2 fail on undefined
default: True
version_added: "1.3"
description:
- When True, this causes ansible templating to fail steps that reference variable names that are likely typoed.
- "Otherwise, any '{{ template_expression }}' that contains undefined variables will be rendered in a template or ansible action line exactly as written."
env: [{name: ANSIBLE_ERROR_ON_UNDEFINED_VARS}]
ini:
- {key: error_on_undefined_vars, section: defaults}
type: boolean
DEFAULT_VARS_PLUGIN_PATH:
name: Vars Plugins Path
default: ~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars
description: Colon separated paths in which Ansible will search for Vars Plugins.
env: [{name: ANSIBLE_VARS_PLUGINS}]
ini:
- {key: vars_plugins, section: defaults}
type: pathspec
# TODO: unused?
#DEFAULT_VAR_COMPRESSION_LEVEL:
# default: 0
# description: 'TODO: write it'
# env: [{name: ANSIBLE_VAR_COMPRESSION_LEVEL}]
# ini:
# - {key: var_compression_level, section: defaults}
# type: integer
# yaml: {key: defaults.var_compression_level}
DEFAULT_VAULT_ID_MATCH:
name: Force vault id match
default: False
description: 'If true, decrypting vaults with a vault id will only try the password from the matching vault-id'
env: [{name: ANSIBLE_VAULT_ID_MATCH}]
ini:
- {key: vault_id_match, section: defaults}
yaml: {key: defaults.vault_id_match}
DEFAULT_VAULT_IDENTITY:
name: Vault id label
default: default
description: 'The label to use for the default vault id label in cases where a vault id label is not provided'
env: [{name: ANSIBLE_VAULT_IDENTITY}]
ini:
- {key: vault_identity, section: defaults}
yaml: {key: defaults.vault_identity}
DEFAULT_VAULT_ENCRYPT_IDENTITY:
name: Vault id to use for encryption
description: 'The vault_id to use for encrypting by default. If multiple vault_ids are provided, this specifies which to use for encryption. The --encrypt-vault-id cli option overrides the configured value.'
env: [{name: ANSIBLE_VAULT_ENCRYPT_IDENTITY}]
ini:
- {key: vault_encrypt_identity, section: defaults}
yaml: {key: defaults.vault_encrypt_identity}
DEFAULT_VAULT_IDENTITY_LIST:
name: Default vault ids
default: []
description: 'A list of vault-ids to use by default. Equivalent to multiple --vault-id args. Vault-ids are tried in order.'
env: [{name: ANSIBLE_VAULT_IDENTITY_LIST}]
ini:
- {key: vault_identity_list, section: defaults}
type: list
yaml: {key: defaults.vault_identity_list}
DEFAULT_VAULT_PASSWORD_FILE:
name: Vault password file
default: ~
description:
- 'The vault password file to use. Equivalent to --vault-password-file or --vault-id'
- If executable, it will be run and the resulting stdout will be used as the password.
env: [{name: ANSIBLE_VAULT_PASSWORD_FILE}]
ini:
- {key: vault_password_file, section: defaults}
type: path
yaml: {key: defaults.vault_password_file}
DEFAULT_VERBOSITY:
name: Verbosity
default: 0
description: Sets the default verbosity, equivalent to the number of ``-v`` passed in the command line.
env: [{name: ANSIBLE_VERBOSITY}]
ini:
- {key: verbosity, section: defaults}
type: integer
DEPRECATION_WARNINGS:
name: Deprecation messages
default: True
description: "Toggle to control the showing of deprecation warnings"
env: [{name: ANSIBLE_DEPRECATION_WARNINGS}]
ini:
- {key: deprecation_warnings, section: defaults}
type: boolean
DEVEL_WARNING:
name: Running devel warning
default: True
description: Toggle to control showing warnings related to running devel
env: [{name: ANSIBLE_DEVEL_WARNING}]
ini:
- {key: devel_warning, section: defaults}
type: boolean
DIFF_ALWAYS:
name: Show differences
default: False
description: Configuration toggle to tell modules to show differences when in 'changed' status, equivalent to ``--diff``.
env: [{name: ANSIBLE_DIFF_ALWAYS}]
ini:
- {key: always, section: diff}
type: bool
DIFF_CONTEXT:
name: Difference context
default: 3
description: How many lines of context to show when displaying the differences between files.
env: [{name: ANSIBLE_DIFF_CONTEXT}]
ini:
- {key: context, section: diff}
type: integer
DISPLAY_ARGS_TO_STDOUT:
name: Show task arguments
default: False
description:
- "Normally ``ansible-playbook`` will print a header for each task that is run.
These headers will contain the name: field from the task if you specified one.
If you didn't then ``ansible-playbook`` uses the task's action to help you tell which task is presently running.
Sometimes you run many of the same action and so you want more information about the task to differentiate it from others of the same action.
If you set this variable to True in the config then ``ansible-playbook`` will also include the task's arguments in the header."
- "This setting defaults to False because there is a chance that you have sensitive values in your parameters and
you do not want those to be printed."
- "If you set this to True you should be sure that you have secured your environment's stdout
(no one can shoulder surf your screen and you aren't saving stdout to an insecure file) or
made sure that all of your playbooks explicitly added the ``no_log: True`` parameter to tasks which have sensitive values
See How do I keep secret data in my playbook? for more information."
env: [{name: ANSIBLE_DISPLAY_ARGS_TO_STDOUT}]
ini:
- {key: display_args_to_stdout, section: defaults}
type: boolean
version_added: "2.1"
DISPLAY_SKIPPED_HOSTS:
name: Show skipped results
default: True
description: "Toggle to control displaying skipped task/host entries in a task in the default callback"
env:
- name: DISPLAY_SKIPPED_HOSTS
deprecated:
why: environment variables without ``ANSIBLE_`` prefix are deprecated
version: "2.12"
alternatives: the ``ANSIBLE_DISPLAY_SKIPPED_HOSTS`` environment variable
- name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
ini:
- {key: display_skipped_hosts, section: defaults}
type: boolean
DOCSITE_ROOT_URL:
name: Root docsite URL
default: https://docs.ansible.com/ansible-core/
description: Root docsite URL used to generate docs URLs in warning/error text;
must be an absolute URL with valid scheme and trailing slash.
ini:
- {key: docsite_root_url, section: defaults}
version_added: "2.8"
DUPLICATE_YAML_DICT_KEY:
name: Controls ansible behaviour when finding duplicate keys in YAML.
default: warn
description:
- By default Ansible will issue a warning when a duplicate dict key is encountered in YAML.
- These warnings can be silenced by adjusting this setting to False.
env: [{name: ANSIBLE_DUPLICATE_YAML_DICT_KEY}]
ini:
- {key: duplicate_dict_key, section: defaults}
type: string
choices: ['warn', 'error', 'ignore']
version_added: "2.9"
ERROR_ON_MISSING_HANDLER:
name: Missing handler error
default: True
description: "Toggle to allow missing handlers to become a warning instead of an error when notifying."
env: [{name: ANSIBLE_ERROR_ON_MISSING_HANDLER}]
ini:
- {key: error_on_missing_handler, section: defaults}
type: boolean
CONNECTION_FACTS_MODULES:
name: Map of connections to fact modules
default:
# use ansible.legacy names on unqualified facts modules to allow library/ overrides
asa: ansible.legacy.asa_facts
cisco.asa.asa: cisco.asa.asa_facts
eos: ansible.legacy.eos_facts
arista.eos.eos: arista.eos.eos_facts
frr: ansible.legacy.frr_facts
frr.frr.frr: frr.frr.frr_facts
ios: ansible.legacy.ios_facts
cisco.ios.ios: cisco.ios.ios_facts
iosxr: ansible.legacy.iosxr_facts
cisco.iosxr.iosxr: cisco.iosxr.iosxr_facts
junos: ansible.legacy.junos_facts
junipernetworks.junos.junos: junipernetworks.junos.junos_facts
nxos: ansible.legacy.nxos_facts
cisco.nxos.nxos: cisco.nxos.nxos_facts
vyos: ansible.legacy.vyos_facts
vyos.vyos.vyos: vyos.vyos.vyos_facts
exos: ansible.legacy.exos_facts
extreme.exos.exos: extreme.exos.exos_facts
slxos: ansible.legacy.slxos_facts
extreme.slxos.slxos: extreme.slxos.slxos_facts
voss: ansible.legacy.voss_facts
extreme.voss.voss: extreme.voss.voss_facts
ironware: ansible.legacy.ironware_facts
community.network.ironware: community.network.ironware_facts
description: "Which modules to run during a play's fact gathering stage based on connection"
type: dict
FACTS_MODULES:
name: Gather Facts Modules
default:
- smart
description: "Which modules to run during a play's fact gathering stage, using the default of 'smart' will try to figure it out based on connection type."
env: [{name: ANSIBLE_FACTS_MODULES}]
ini:
- {key: facts_modules, section: defaults}
type: list
vars:
- name: ansible_facts_modules
GALAXY_IGNORE_CERTS:
name: Galaxy validate certs
default: False
description:
- If set to yes, ansible-galaxy will not validate TLS certificates.
This can be useful for testing against a server with a self-signed certificate.
env: [{name: ANSIBLE_GALAXY_IGNORE}]
ini:
- {key: ignore_certs, section: galaxy}
type: boolean
GALAXY_ROLE_SKELETON:
name: Galaxy role skeleton directory
description: Role skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy``/``ansible-galaxy role``, same as ``--role-skeleton``.
env: [{name: ANSIBLE_GALAXY_ROLE_SKELETON}]
ini:
- {key: role_skeleton, section: galaxy}
type: path
GALAXY_ROLE_SKELETON_IGNORE:
name: Galaxy role skeleton ignore
default: ["^.git$", "^.*/.git_keep$"]
description: patterns of files to ignore inside a Galaxy role or collection skeleton directory
env: [{name: ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE}]
ini:
- {key: role_skeleton_ignore, section: galaxy}
type: list
GALAXY_COLLECTION_SKELETON:
name: Galaxy collection skeleton directory
description: Collection skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy collection``, same as ``--collection-skeleton``.
env: [{name: ANSIBLE_GALAXY_COLLECTION_SKELETON}]
ini:
- {key: collection_skeleton, section: galaxy}
type: path
GALAXY_COLLECTION_SKELETON_IGNORE:
name: Galaxy collection skeleton ignore
default: ["^.git$", "^.*/.git_keep$"]
description: patterns of files to ignore inside a Galaxy collection skeleton directory
env: [{name: ANSIBLE_GALAXY_COLLECTION_SKELETON_IGNORE}]
ini:
- {key: collection_skeleton_ignore, section: galaxy}
type: list
# TODO: unused?
#GALAXY_SCMS:
# name: Galaxy SCMS
# default: git, hg
# description: Available galaxy source control management systems.
# env: [{name: ANSIBLE_GALAXY_SCMS}]
# ini:
# - {key: scms, section: galaxy}
# type: list
GALAXY_SERVER:
default: https://galaxy.ansible.com
description: "URL to prepend when roles don't specify the full URI, assume they are referencing this server as the source."
env: [{name: ANSIBLE_GALAXY_SERVER}]
ini:
- {key: server, section: galaxy}
yaml: {key: galaxy.server}
GALAXY_SERVER_LIST:
description:
- A list of Galaxy servers to use when installing a collection.
- The value corresponds to the config ini header ``[galaxy_server.{{item}}]`` which defines the server details.
- 'See :ref:`galaxy_server_config` for more details on how to define a Galaxy server.'
- The order of servers in this list is used to as the order in which a collection is resolved.
- Setting this config option will ignore the :ref:`galaxy_server` config option.
env: [{name: ANSIBLE_GALAXY_SERVER_LIST}]
ini:
- {key: server_list, section: galaxy}
type: list
version_added: "2.9"
GALAXY_TOKEN_PATH:
default: ~/.ansible/galaxy_token
description: "Local path to galaxy access token file"
env: [{name: ANSIBLE_GALAXY_TOKEN_PATH}]
ini:
- {key: token_path, section: galaxy}
type: path
version_added: "2.9"
GALAXY_DISPLAY_PROGRESS:
default: ~
description:
- Some steps in ``ansible-galaxy`` display a progress wheel which can cause issues on certain displays or when
outputing the stdout to a file.
- This config option controls whether the display wheel is shown or not.
- The default is to show the display wheel if stdout has a tty.
env: [{name: ANSIBLE_GALAXY_DISPLAY_PROGRESS}]
ini:
- {key: display_progress, section: galaxy}
type: bool
version_added: "2.10"
GALAXY_CACHE_DIR:
default: ~/.ansible/galaxy_cache
description:
- The directory that stores cached responses from a Galaxy server.
- This is only used by the ``ansible-galaxy collection install`` and ``download`` commands.
- Cache files inside this dir will be ignored if they are world writable.
env:
- name: ANSIBLE_GALAXY_CACHE_DIR
ini:
- section: galaxy
key: cache_dir
type: path
version_added: '2.11'
HOST_KEY_CHECKING:
# note: constant not in use by ssh plugin anymore
# TODO: check non ssh connection plugins for use/migration
name: Check host keys
default: True
description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host'
env: [{name: ANSIBLE_HOST_KEY_CHECKING}]
ini:
- {key: host_key_checking, section: defaults}
type: boolean
HOST_PATTERN_MISMATCH:
name: Control host pattern mismatch behaviour
default: 'warning'
description: This setting changes the behaviour of mismatched host patterns, it allows you to force a fatal error, a warning or just ignore it
env: [{name: ANSIBLE_HOST_PATTERN_MISMATCH}]
ini:
- {key: host_pattern_mismatch, section: inventory}
choices: ['warning', 'error', 'ignore']
version_added: "2.8"
INTERPRETER_PYTHON:
name: Python interpreter path (or automatic discovery behavior) used for module execution
default: auto
env: [{name: ANSIBLE_PYTHON_INTERPRETER}]
ini:
- {key: interpreter_python, section: defaults}
vars:
- {name: ansible_python_interpreter}
version_added: "2.8"
description:
- Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode.
Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``.
All discovery modes employ a lookup table to use the included system Python (on distributions known to include one),
falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not
available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters
installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or
``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backwards-compatibility
with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present.
INTERPRETER_PYTHON_DISTRO_MAP:
name: Mapping of known included platform pythons for various Linux distros
default:
centos: &rhelish
'6': /usr/bin/python
'8': /usr/libexec/platform-python
'9': /usr/bin/python3
debian:
'8': /usr/bin/python
'10': /usr/bin/python3
fedora:
'23': /usr/bin/python3
oracle: *rhelish
redhat: *rhelish
rhel: *rhelish
ubuntu:
'14': /usr/bin/python
'16': /usr/bin/python3
version_added: "2.8"
# FUTURE: add inventory override once we're sure it can't be abused by a rogue target
# FUTURE: add a platform layer to the map so we could use for, eg, freebsd/macos/etc?
INTERPRETER_PYTHON_FALLBACK:
name: Ordered list of Python interpreters to check for in discovery
default:
- python3.10
- python3.9
- python3.8
- python3.7
- python3.6
- python3.5
- /usr/bin/python3
- /usr/libexec/platform-python
- python2.7
- python2.6
- /usr/bin/python
- python
vars:
- name: ansible_interpreter_python_fallback
type: list
version_added: "2.8"
TRANSFORM_INVALID_GROUP_CHARS:
name: Transform invalid characters in group names
default: 'never'
description:
- Make ansible transform invalid characters in group names supplied by inventory sources.
- If 'never' it will allow for the group name but warn about the issue.
- When 'ignore', it does the same as 'never', without issuing a warning.
- When 'always' it will replace any invalid characters with '_' (underscore) and warn the user
- When 'silently', it does the same as 'always', without issuing a warning.
env: [{name: ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS}]
ini:
- {key: force_valid_group_names, section: defaults}
type: string
choices: ['always', 'never', 'ignore', 'silently']
version_added: '2.8'
INVALID_TASK_ATTRIBUTE_FAILED:
name: Controls whether invalid attributes for a task result in errors instead of warnings
default: True
description: If 'false', invalid attributes for a task will result in warnings instead of errors
type: boolean
env:
- name: ANSIBLE_INVALID_TASK_ATTRIBUTE_FAILED
ini:
- key: invalid_task_attribute_failed
section: defaults
version_added: "2.7"
INVENTORY_ANY_UNPARSED_IS_FAILED:
name: Controls whether any unparseable inventory source is a fatal error
default: False
description: >
If 'true', it is a fatal error when any given inventory source
cannot be successfully parsed by any available inventory plugin;
otherwise, this situation only attracts a warning.
type: boolean
env: [{name: ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED}]
ini:
- {key: any_unparsed_is_failed, section: inventory}
version_added: "2.7"
INVENTORY_CACHE_ENABLED:
name: Inventory caching enabled
default: False
description:
- Toggle to turn on inventory caching.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory configuration.
- This message will be removed in 2.16.
env: [{name: ANSIBLE_INVENTORY_CACHE}]
ini:
- {key: cache, section: inventory}
type: bool
INVENTORY_CACHE_PLUGIN:
name: Inventory cache plugin
description:
- The plugin for caching inventory.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
- This message will be removed in 2.16.
env: [{name: ANSIBLE_INVENTORY_CACHE_PLUGIN}]
ini:
- {key: cache_plugin, section: inventory}
INVENTORY_CACHE_PLUGIN_CONNECTION:
name: Inventory cache plugin URI to override the defaults section
description:
- The inventory cache connection.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
- This message will be removed in 2.16.
env: [{name: ANSIBLE_INVENTORY_CACHE_CONNECTION}]
ini:
- {key: cache_connection, section: inventory}
INVENTORY_CACHE_PLUGIN_PREFIX:
name: Inventory cache plugin table prefix
description:
- The table prefix for the cache plugin.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
- This message will be removed in 2.16.
env: [{name: ANSIBLE_INVENTORY_CACHE_PLUGIN_PREFIX}]
default: ansible_inventory_
ini:
- {key: cache_prefix, section: inventory}
INVENTORY_CACHE_TIMEOUT:
name: Inventory cache plugin expiration timeout
description:
- Expiration timeout for the inventory cache plugin data.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
- This message will be removed in 2.16.
default: 3600
env: [{name: ANSIBLE_INVENTORY_CACHE_TIMEOUT}]
ini:
- {key: cache_timeout, section: inventory}
INVENTORY_ENABLED:
name: Active Inventory plugins
default: ['host_list', 'script', 'auto', 'yaml', 'ini', 'toml']
description: List of enabled inventory plugins, it also determines the order in which they are used.
env: [{name: ANSIBLE_INVENTORY_ENABLED}]
ini:
- {key: enable_plugins, section: inventory}
type: list
INVENTORY_EXPORT:
name: Set ansible-inventory into export mode
default: False
description: Controls if ansible-inventory will accurately reflect Ansible's view into inventory or its optimized for exporting.
env: [{name: ANSIBLE_INVENTORY_EXPORT}]
ini:
- {key: export, section: inventory}
type: bool
INVENTORY_IGNORE_EXTS:
name: Inventory ignore extensions
default: "{{(REJECT_EXTS + ('.orig', '.ini', '.cfg', '.retry'))}}"
description: List of extensions to ignore when using a directory as an inventory source
env: [{name: ANSIBLE_INVENTORY_IGNORE}]
ini:
- {key: inventory_ignore_extensions, section: defaults}
- {key: ignore_extensions, section: inventory}
type: list
INVENTORY_IGNORE_PATTERNS:
name: Inventory ignore patterns
default: []
description: List of patterns to ignore when using a directory as an inventory source
env: [{name: ANSIBLE_INVENTORY_IGNORE_REGEX}]
ini:
- {key: inventory_ignore_patterns, section: defaults}
- {key: ignore_patterns, section: inventory}
type: list
INVENTORY_UNPARSED_IS_FAILED:
name: Unparsed Inventory failure
default: False
description: >
If 'true' it is a fatal error if every single potential inventory
source fails to parse, otherwise this situation will only attract a
warning.
env: [{name: ANSIBLE_INVENTORY_UNPARSED_FAILED}]
ini:
- {key: unparsed_is_failed, section: inventory}
type: bool
JINJA2_NATIVE_WARNING:
name: Running older than required Jinja version for jinja2_native warning
default: True
description: Toggle to control showing warnings related to running a Jinja version
older than required for jinja2_native
env:
- name: ANSIBLE_JINJA2_NATIVE_WARNING
deprecated:
why: This option is no longer used in the Ansible Core code base.
version: "2.17"
ini:
- {key: jinja2_native_warning, section: defaults}
type: boolean
MAX_FILE_SIZE_FOR_DIFF:
name: Diff maximum file size
default: 104448
description: Maximum size of files to be considered for diff display
env: [{name: ANSIBLE_MAX_DIFF_SIZE}]
ini:
- {key: max_diff_size, section: defaults}
type: int
NETWORK_GROUP_MODULES:
name: Network module families
default: [eos, nxos, ios, iosxr, junos, enos, ce, vyos, sros, dellos9, dellos10, dellos6, asa, aruba, aireos, bigip, ironware, onyx, netconf, exos, voss, slxos]
description: 'TODO: write it'
env:
- name: NETWORK_GROUP_MODULES
deprecated:
why: environment variables without ``ANSIBLE_`` prefix are deprecated
version: "2.12"
alternatives: the ``ANSIBLE_NETWORK_GROUP_MODULES`` environment variable
- name: ANSIBLE_NETWORK_GROUP_MODULES
ini:
- {key: network_group_modules, section: defaults}
type: list
yaml: {key: defaults.network_group_modules}
INJECT_FACTS_AS_VARS:
default: True
description:
- Facts are available inside the `ansible_facts` variable, this setting also pushes them as their own vars in the main namespace.
- Unlike inside the `ansible_facts` dictionary, these will have an `ansible_` prefix.
env: [{name: ANSIBLE_INJECT_FACT_VARS}]
ini:
- {key: inject_facts_as_vars, section: defaults}
type: boolean
version_added: "2.5"
MODULE_IGNORE_EXTS:
name: Module ignore extensions
default: "{{(REJECT_EXTS + ('.yaml', '.yml', '.ini'))}}"
description:
- List of extensions to ignore when looking for modules to load
- This is for rejecting script and binary module fallback extensions
env: [{name: ANSIBLE_MODULE_IGNORE_EXTS}]
ini:
- {key: module_ignore_exts, section: defaults}
type: list
OLD_PLUGIN_CACHE_CLEARING:
description: Previously Ansible would only clear some of the plugin loading caches when loading new roles, this led to some behaviours in which a plugin loaded in prevoius plays would be unexpectedly 'sticky'. This setting allows to return to that behaviour.
env: [{name: ANSIBLE_OLD_PLUGIN_CACHE_CLEAR}]
ini:
- {key: old_plugin_cache_clear, section: defaults}
type: boolean
default: False
version_added: "2.8"
PARAMIKO_HOST_KEY_AUTO_ADD:
# TODO: move to plugin
default: False
description: 'TODO: write it'
env: [{name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD}]
ini:
- {key: host_key_auto_add, section: paramiko_connection}
type: boolean
PARAMIKO_LOOK_FOR_KEYS:
name: look for keys
default: True
description: 'TODO: write it'
env: [{name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS}]
ini:
- {key: look_for_keys, section: paramiko_connection}
type: boolean
PERSISTENT_CONTROL_PATH_DIR:
name: Persistence socket path
default: ~/.ansible/pc
description: Path to socket to be used by the connection persistence system.
env: [{name: ANSIBLE_PERSISTENT_CONTROL_PATH_DIR}]
ini:
- {key: control_path_dir, section: persistent_connection}
type: path
PERSISTENT_CONNECT_TIMEOUT:
name: Persistence timeout
default: 30
description: This controls how long the persistent connection will remain idle before it is destroyed.
env: [{name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT}]
ini:
- {key: connect_timeout, section: persistent_connection}
type: integer
PERSISTENT_CONNECT_RETRY_TIMEOUT:
name: Persistence connection retry timeout
default: 15
description: This controls the retry timeout for persistent connection to connect to the local domain socket.
env: [{name: ANSIBLE_PERSISTENT_CONNECT_RETRY_TIMEOUT}]
ini:
- {key: connect_retry_timeout, section: persistent_connection}
type: integer
PERSISTENT_COMMAND_TIMEOUT:
name: Persistence command timeout
default: 30
description: This controls the amount of time to wait for response from remote device before timing out persistent connection.
env: [{name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT}]
ini:
- {key: command_timeout, section: persistent_connection}
type: int
PLAYBOOK_DIR:
name: playbook dir override for non-playbook CLIs (ala --playbook-dir)
version_added: "2.9"
description:
- A number of non-playbook CLIs have a ``--playbook-dir`` argument; this sets the default value for it.
env: [{name: ANSIBLE_PLAYBOOK_DIR}]
ini: [{key: playbook_dir, section: defaults}]
type: path
PLAYBOOK_VARS_ROOT:
name: playbook vars files root
default: top
version_added: "2.4.1"
description:
- This sets which playbook dirs will be used as a root to process vars plugins, which includes finding host_vars/group_vars
- The ``top`` option follows the traditional behaviour of using the top playbook in the chain to find the root directory.
- The ``bottom`` option follows the 2.4.0 behaviour of using the current playbook to find the root directory.
- The ``all`` option examines from the first parent to the current playbook.
env: [{name: ANSIBLE_PLAYBOOK_VARS_ROOT}]
ini:
- {key: playbook_vars_root, section: defaults}
choices: [ top, bottom, all ]
PLUGIN_FILTERS_CFG:
name: Config file for limiting valid plugins
default: null
version_added: "2.5.0"
description:
- "A path to configuration for filtering which plugins installed on the system are allowed to be used."
- "See :ref:`plugin_filtering_config` for details of the filter file's format."
- " The default is /etc/ansible/plugin_filters.yml"
ini:
- key: plugin_filters_cfg
section: default
deprecated:
why: specifying "plugin_filters_cfg" under the "default" section is deprecated
version: "2.12"
alternatives: the "defaults" section instead
- key: plugin_filters_cfg
section: defaults
type: path
PYTHON_MODULE_RLIMIT_NOFILE:
name: Adjust maximum file descriptor soft limit during Python module execution
description:
- Attempts to set RLIMIT_NOFILE soft limit to the specified value when executing Python modules (can speed up subprocess usage on
Python 2.x. See https://bugs.python.org/issue11284). The value will be limited by the existing hard limit. Default
value of 0 does not attempt to adjust existing system-defined limits.
default: 0
env:
- {name: ANSIBLE_PYTHON_MODULE_RLIMIT_NOFILE}
ini:
- {key: python_module_rlimit_nofile, section: defaults}
vars:
- {name: ansible_python_module_rlimit_nofile}
version_added: '2.8'
RETRY_FILES_ENABLED:
name: Retry files
default: False
description: This controls whether a failed Ansible playbook should create a .retry file.
env: [{name: ANSIBLE_RETRY_FILES_ENABLED}]
ini:
- {key: retry_files_enabled, section: defaults}
type: bool
RETRY_FILES_SAVE_PATH:
name: Retry files path
default: ~
description:
- This sets the path in which Ansible will save .retry files when a playbook fails and retry files are enabled.
- This file will be overwritten after each run with the list of failed hosts from all plays.
env: [{name: ANSIBLE_RETRY_FILES_SAVE_PATH}]
ini:
- {key: retry_files_save_path, section: defaults}
type: path
RUN_VARS_PLUGINS:
name: When should vars plugins run relative to inventory
default: demand
description:
- This setting can be used to optimize vars_plugin usage depending on user's inventory size and play selection.
- Setting to C(demand) will run vars_plugins relative to inventory sources anytime vars are 'demanded' by tasks.
- Setting to C(start) will run vars_plugins relative to inventory sources after importing that inventory source.
env: [{name: ANSIBLE_RUN_VARS_PLUGINS}]
ini:
- {key: run_vars_plugins, section: defaults}
type: str
choices: ['demand', 'start']
version_added: "2.10"
SHOW_CUSTOM_STATS:
name: Display custom stats
default: False
description: 'This adds the custom stats set via the set_stats plugin to the default output'
env: [{name: ANSIBLE_SHOW_CUSTOM_STATS}]
ini:
- {key: show_custom_stats, section: defaults}
type: bool
STRING_TYPE_FILTERS:
name: Filters to preserve strings
default: [string, to_json, to_nice_json, to_yaml, to_nice_yaml, ppretty, json]
description:
- "This list of filters avoids 'type conversion' when templating variables"
- Useful when you want to avoid conversion into lists or dictionaries for JSON strings, for example.
env: [{name: ANSIBLE_STRING_TYPE_FILTERS}]
ini:
- {key: dont_type_filters, section: jinja2}
type: list
SYSTEM_WARNINGS:
name: System warnings
default: True
description:
- Allows disabling of warnings related to potential issues on the system running ansible itself (not on the managed hosts)
- These may include warnings about 3rd party packages or other conditions that should be resolved if possible.
env: [{name: ANSIBLE_SYSTEM_WARNINGS}]
ini:
- {key: system_warnings, section: defaults}
type: boolean
TAGS_RUN:
name: Run Tags
default: []
type: list
description: default list of tags to run in your plays, Skip Tags has precedence.
env: [{name: ANSIBLE_RUN_TAGS}]
ini:
- {key: run, section: tags}
version_added: "2.5"
TAGS_SKIP:
name: Skip Tags
default: []
type: list
description: default list of tags to skip in your plays, has precedence over Run Tags
env: [{name: ANSIBLE_SKIP_TAGS}]
ini:
- {key: skip, section: tags}
version_added: "2.5"
TASK_TIMEOUT:
name: Task Timeout
default: 0
description:
- Set the maximum time (in seconds) that a task can run for.
- If set to 0 (the default) there is no timeout.
env: [{name: ANSIBLE_TASK_TIMEOUT}]
ini:
- {key: task_timeout, section: defaults}
type: integer
version_added: '2.10'
WORKER_SHUTDOWN_POLL_COUNT:
name: Worker Shutdown Poll Count
default: 0
description:
- The maximum number of times to check Task Queue Manager worker processes to verify they have exited cleanly.
- After this limit is reached any worker processes still running will be terminated.
- This is for internal use only.
env: [{name: ANSIBLE_WORKER_SHUTDOWN_POLL_COUNT}]
type: integer
version_added: '2.10'
WORKER_SHUTDOWN_POLL_DELAY:
name: Worker Shutdown Poll Delay
default: 0.1
description:
- The number of seconds to sleep between polling loops when checking Task Queue Manager worker processes to verify they have exited cleanly.
- This is for internal use only.
env: [{name: ANSIBLE_WORKER_SHUTDOWN_POLL_DELAY}]
type: float
version_added: '2.10'
USE_PERSISTENT_CONNECTIONS:
name: Persistence
default: False
description: Toggles the use of persistence for connections.
env: [{name: ANSIBLE_USE_PERSISTENT_CONNECTIONS}]
ini:
- {key: use_persistent_connections, section: defaults}
type: boolean
VARIABLE_PLUGINS_ENABLED:
name: Vars plugin enabled list
default: ['host_group_vars']
description: Whitelist for variable plugins that require it.
env: [{name: ANSIBLE_VARS_ENABLED}]
ini:
- {key: vars_plugins_enabled, section: defaults}
type: list
version_added: "2.10"
VARIABLE_PRECEDENCE:
name: Group variable precedence
default: ['all_inventory', 'groups_inventory', 'all_plugins_inventory', 'all_plugins_play', 'groups_plugins_inventory', 'groups_plugins_play']
description: Allows to change the group variable precedence merge order.
env: [{name: ANSIBLE_PRECEDENCE}]
ini:
- {key: precedence, section: defaults}
type: list
version_added: "2.4"
WIN_ASYNC_STARTUP_TIMEOUT:
name: Windows Async Startup Timeout
default: 5
description:
- For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling),
this is how long, in seconds, to wait for the task spawned by Ansible to connect back to the named pipe used
on Windows systems. The default is 5 seconds. This can be too low on slower systems, or systems under heavy load.
- This is not the total time an async command can run for, but is a separate timeout to wait for an async command to
start. The task will only start to be timed against its async_timeout once it has connected to the pipe, so the
overall maximum duration the task can take will be extended by the amount specified here.
env: [{name: ANSIBLE_WIN_ASYNC_STARTUP_TIMEOUT}]
ini:
- {key: win_async_startup_timeout, section: defaults}
type: integer
vars:
- {name: ansible_win_async_startup_timeout}
version_added: '2.10'
YAML_FILENAME_EXTENSIONS:
name: Valid YAML extensions
default: [".yml", ".yaml", ".json"]
description:
- "Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these."
- 'This affects vars_files, include_vars, inventory and vars plugins among others.'
env:
- name: ANSIBLE_YAML_FILENAME_EXT
ini:
- section: defaults
key: yaml_valid_extensions
type: list
NETCONF_SSH_CONFIG:
description: This variable is used to enable bastion/jump host with netconf connection. If set to True the bastion/jump
host ssh settings should be present in ~/.ssh/config file, alternatively it can be set
to custom ssh configuration file path to read the bastion/jump host settings.
env: [{name: ANSIBLE_NETCONF_SSH_CONFIG}]
ini:
- {key: ssh_config, section: netconf_connection}
yaml: {key: netconf_connection.ssh_config}
default: null
STRING_CONVERSION_ACTION:
version_added: '2.8'
description:
- Action to take when a module parameter value is converted to a string (this does not affect variables).
For string parameters, values such as '1.00', "['a', 'b',]", and 'yes', 'y', etc.
will be converted by the YAML parser unless fully quoted.
- Valid options are 'error', 'warn', and 'ignore'.
- Since 2.8, this option defaults to 'warn' but will change to 'error' in 2.12.
default: 'warn'
env:
- name: ANSIBLE_STRING_CONVERSION_ACTION
ini:
- section: defaults
key: string_conversion_action
type: string
VALIDATE_ACTION_GROUP_METADATA:
version_added: '2.12'
description:
- A toggle to disable validating a collection's 'metadata' entry for a module_defaults action group.
Metadata containing unexpected fields or value types will produce a warning when this is True.
default: True
env: [{name: ANSIBLE_VALIDATE_ACTION_GROUP_METADATA}]
ini:
- section: defaults
key: validate_action_group_metadata
type: bool
VERBOSE_TO_STDERR:
version_added: '2.8'
description:
- Force 'verbose' option to use stderr instead of stdout
default: False
env:
- name: ANSIBLE_VERBOSE_TO_STDERR
ini:
- section: defaults
key: verbose_to_stderr
type: bool
...
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 72,263 |
fact_path in ansible.cfg has no effect on ad-hoc setup or setup invoked as task
|
##### SUMMARY
When setting `fact_path` in `ansible.cfg`, the configured path is used only for `gather_facts`; it has no effect when using `setup` as a task or when running `ansible -m setup` ad-hoc. This behavior is in fact [documented](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/setup_module.html#):
> The default fact_path can be specified in ansible.cfg for when setup is automatically called as part of gather_facts
This is particularly painful when the controller is running, say, FreeBSD, as they package Ansible with a default of `/usr/local/etc/ansible/facts.d` which differs greatly from Ansible's typical default of `/etc/ansible/facts.d`. Note that I do not think this is a FreeBSD packaging issue.
##### ISSUE TYPE
- Feature Request
##### COMPONENT NAME
setup
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```
ansible 2.10.2
config file = /Users/jpm/.ansible.cfg
configured module search path = ['/etc/ansible/library']
ansible python module location = /private/tmp/an/env.v3/lib/python3.8/site-packages/ansible
executable location = /private/tmp/an/env.v3/bin/ansible
python version = 3.8.5 (default, Sep 27 2020, 11:38:54) [Clang 12.0.0 (clang-1200.0.32.2)]
```
##### CONFIGURATION
<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
```
ANSIBLE_NOCOWS(/Users/jpm/.ansible.cfg) = True
ANSIBLE_SSH_ARGS(/Users/jpm/.ansible.cfg) = -o PasswordAuthentication=no -o ControlMaster=auto -o ControlPersist=60s -o ControlPath=/tmp/ansible-ssh-%h-%p-%r -o PreferredAuthentications=publickey,gssapi-with-mic,gssapi-keyex,hostbased
DEFAULT_ACTION_PLUGIN_PATH(/Users/jpm/.ansible.cfg) = ['/etc/ansible/jp.action_plugins']
DEFAULT_FACT_PATH(/Users/jpm/.ansible.cfg) = /tmp/jp/facts.d
DEFAULT_FILTER_PLUGIN_PATH(/Users/jpm/.ansible.cfg) = ['/etc/ansible/jp.filter_plugins']
DEFAULT_HOST_LIST(/Users/jpm/.ansible.cfg) = ['/etc/ansible/hosts']
DEFAULT_LOAD_CALLBACK_PLUGINS(/Users/jpm/.ansible.cfg) = True
DEFAULT_MANAGED_STR(/Users/jpm/.ansible.cfg) = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
DEFAULT_MODULE_PATH(/Users/jpm/.ansible.cfg) = ['/etc/ansible/library']
RETRY_FILES_ENABLED(/Users/jpm/.ansible.cfg) = False
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
Linux, FreeBSD, MacOSX
##### STEPS TO REPRODUCE
1. Create an new directory (`/tmp/jp/facts.d`) and a fact file within
2. Run playbook
3. Run ad-hoc `setup`
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- hosts: 127.0.0.1
connection: local
gather_facts: true
tasks:
- debug: msg='{{ ansible_local.hw.name }}'
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
I expect the ad-hoc `setup` to produce the same values as the setup invocation via `gather_facts`
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
```console
$ cat /tmp/jp/facts.d/hw.fact
{ "name" : "Jane Jolie" }
$ ansible-playbook test.yml
PLAY [127.0.0.1] ********************************************************************
TASK [Gathering Facts] **************************************************************
ok: [localhost]
TASK [debug] ************************************************************************
ok: [localhost] => {
"msg": "Jane Jolie"
}
```
This does not work as expected:
```console
$ ansible 127.0.0.1 -c local -m setup -a filter=ansible_local
localhost | SUCCESS => {
"ansible_facts": {
"ansible_local": {}
},
"changed": false
}
```
|
https://github.com/ansible/ansible/issues/72263
|
https://github.com/ansible/ansible/pull/76053
|
c53e6d94e987c27f60bed55a2dc8001ffcd10cd2
|
0b6d3312dd4dcc7a53b9027e07723cd5752a65ea
| 2020-10-20T12:04:47Z |
python
| 2022-01-10T22:25:00Z |
lib/ansible/executor/play_iterator.py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
from enum import IntEnum, IntFlag
from ansible import constants as C
from ansible.errors import AnsibleAssertionError
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayIterator', 'IteratingStates', 'FailedStates']
class IteratingStates(IntEnum):
SETUP = 0
TASKS = 1
RESCUE = 2
ALWAYS = 3
COMPLETE = 4
class FailedStates(IntFlag):
NONE = 0
SETUP = 1
TASKS = 2
RESCUE = 4
ALWAYS = 8
class HostState:
def __init__(self, blocks):
self._blocks = blocks[:]
self.cur_block = 0
self.cur_regular_task = 0
self.cur_rescue_task = 0
self.cur_always_task = 0
self.run_state = IteratingStates.SETUP
self.fail_state = FailedStates.NONE
self.pending_setup = False
self.tasks_child_state = None
self.rescue_child_state = None
self.always_child_state = None
self.did_rescue = False
self.did_start_at_task = False
def __repr__(self):
return "HostState(%r)" % self._blocks
def __str__(self):
return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), "
"rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
self.cur_always_task,
self.run_state,
self.fail_state,
self.pending_setup,
self.tasks_child_state,
self.rescue_child_state,
self.always_child_state,
self.did_rescue,
self.did_start_at_task,
))
def __eq__(self, other):
if not isinstance(other, HostState):
return False
for attr in ('_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task',
'run_state', 'fail_state', 'pending_setup',
'tasks_child_state', 'rescue_child_state', 'always_child_state'):
if getattr(self, attr) != getattr(other, attr):
return False
return True
def get_current_block(self):
return self._blocks[self.cur_block]
def copy(self):
new_state = HostState(self._blocks)
new_state.cur_block = self.cur_block
new_state.cur_regular_task = self.cur_regular_task
new_state.cur_rescue_task = self.cur_rescue_task
new_state.cur_always_task = self.cur_always_task
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pending_setup = self.pending_setup
new_state.did_rescue = self.did_rescue
new_state.did_start_at_task = self.did_start_at_task
if self.tasks_child_state is not None:
new_state.tasks_child_state = self.tasks_child_state.copy()
if self.rescue_child_state is not None:
new_state.rescue_child_state = self.rescue_child_state.copy()
if self.always_child_state is not None:
new_state.always_child_state = self.always_child_state.copy()
return new_state
def _redirect_to_enum(name):
if name.startswith('ITERATING_'):
rv = getattr(IteratingStates, name.replace('ITERATING_', ''))
display.deprecated(
f"PlayIterator.{name} is deprecated, use ansible.play_iterator.IteratingStates.{name} instead.",
version=2.14
)
return rv
elif name.startswith('FAILED_'):
rv = getattr(FailedStates, name.replace('FAILED_', ''))
display.deprecated(
f"PlayIterator.{name} is deprecated, use ansible.play_iterator.FailedStates.{name} instead.",
version=2.14
)
return rv
raise AttributeError(name)
class MetaPlayIterator(type):
"""Meta class to intercept calls to old *class* attributes
like PlayIterator.ITERATING_TASKS and use new enums instead.
This is for backwards compatibility as 3rd party strategies might
use those attributes. Deprecation warning is printed when old attr
is redirected to new enum.
"""
def __getattribute__(cls, name):
try:
rv = _redirect_to_enum(name)
except AttributeError:
return super().__getattribute__(name)
return rv
class PlayIterator(metaclass=MetaPlayIterator):
def __getattr__(self, name):
"""Same as MetaPlayIterator.__getattribute__ but for instance attributes,
because our code used iterator_object.ITERATING_TASKS so it's safe to assume
that 3rd party code could use that too.
__getattr__ is called when the default attribute access fails so this
should not impact existing attributes lookup.
"""
return _redirect_to_enum(name)
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
self._play = play
self._blocks = []
self._variable_manager = variable_manager
# Default options to gather
gather_subset = self._play.gather_subset
gather_timeout = self._play.gather_timeout
fact_path = self._play.fact_path
setup_block = Block(play=self._play)
# Gathering facts with run_once would copy the facts from one host to
# the others.
setup_block.run_once = False
setup_task = Task(block=setup_block)
setup_task.action = 'gather_facts'
setup_task.name = 'Gathering Facts'
setup_task.args = {
'gather_subset': gather_subset,
}
# Unless play is specifically tagged, gathering should 'always' run
if not self._play.tags:
setup_task.tags = ['always']
if gather_timeout:
setup_task.args['gather_timeout'] = gather_timeout
if fact_path:
setup_task.args['fact_path'] = fact_path
setup_task.set_loader(self._play._loader)
# short circuit fact gathering if the entire playbook is conditional
if self._play._included_conditional is not None:
setup_task.when = self._play._included_conditional[:]
setup_block.block = [setup_task]
setup_block = setup_block.filter_tagged_tasks(all_vars)
self._blocks.append(setup_block)
for block in self._play.compile():
new_block = block.filter_tagged_tasks(all_vars)
if new_block.has_tasks():
self._blocks.append(new_block)
self._host_states = {}
start_at_matched = False
batch = inventory.get_hosts(self._play.hosts, order=self._play.order)
self.batch_size = len(batch)
for host in batch:
self.set_state_for_host(host.name, HostState(blocks=self._blocks))
# if we're looking to start at a specific task, iterate through
# the tasks for this host until we find the specified task
if play_context.start_at_task is not None and not start_at_done:
while True:
(s, task) = self.get_next_task_for_host(host, peek=True)
if s.run_state == IteratingStates.COMPLETE:
break
if task.name == play_context.start_at_task or (task.name and fnmatch.fnmatch(task.name, play_context.start_at_task)) or \
task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
start_at_matched = True
break
self.set_state_for_host(host.name, s)
# finally, reset the host's state to IteratingStates.SETUP
if start_at_matched:
self._host_states[host.name].did_start_at_task = True
self._host_states[host.name].run_state = IteratingStates.SETUP
if start_at_matched:
# we have our match, so clear the start_at_task field on the
# play context to flag that we've started at a task (and future
# plays won't try to advance)
play_context.start_at_task = None
self.end_play = False
def get_host_state(self, host):
# Since we're using the PlayIterator to carry forward failed hosts,
# in the event that a previous host was not in the current inventory
# we create a stub state for it now
if host.name not in self._host_states:
self.set_state_for_host(host.name, HostState(blocks=[]))
return self._host_states[host.name].copy()
def cache_block_tasks(self, block):
# now a noop, we've changed the way we do caching and finding of
# original task entries, but just in case any 3rd party strategies
# are using this we're leaving it here for now
return
def get_next_task_for_host(self, host, peek=False):
display.debug("getting the next task for host %s" % host.name)
s = self.get_host_state(host)
task = None
if s.run_state == IteratingStates.COMPLETE:
display.debug("host %s is done iterating, returning" % host.name)
return (s, None)
(s, task) = self._get_next_task_from_state(s, host=host)
if not peek:
self.set_state_for_host(host.name, s)
display.debug("done getting next task for host %s" % host.name)
display.debug(" ^ task is: %s" % task)
display.debug(" ^ state is: %s" % s)
return (s, task)
def _get_next_task_from_state(self, state, host):
task = None
# try and find the next task, given the current state.
while True:
# try to get the current block from the list of blocks, and
# if we run past the end of the list we know we're done with
# this block
try:
block = state._blocks[state.cur_block]
except IndexError:
state.run_state = IteratingStates.COMPLETE
return (state, None)
if state.run_state == IteratingStates.SETUP:
# First, we check to see if we were pending setup. If not, this is
# the first trip through IteratingStates.SETUP, so we set the pending_setup
# flag and try to determine if we do in fact want to gather facts for
# the specified host.
if not state.pending_setup:
state.pending_setup = True
# Gather facts if the default is 'smart' and we have not yet
# done it for this host; or if 'explicit' and the play sets
# gather_facts to True; or if 'implicit' and the play does
# NOT explicitly set gather_facts to False.
gathering = C.DEFAULT_GATHERING
implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False)
if (gathering == 'implicit' and implied) or \
(gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
(gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('_ansible_facts_gathered', False))):
# The setup block is always self._blocks[0], as we inject it
# during the play compilation in __init__ above.
setup_block = self._blocks[0]
if setup_block.has_tasks() and len(setup_block.block) > 0:
task = setup_block.block[0]
else:
# This is the second trip through IteratingStates.SETUP, so we clear
# the flag and move onto the next block in the list while setting
# the run state to IteratingStates.TASKS
state.pending_setup = False
state.run_state = IteratingStates.TASKS
if not state.did_start_at_task:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
elif state.run_state == IteratingStates.TASKS:
# clear the pending setup flag, since we're past that and it didn't fail
if state.pending_setup:
state.pending_setup = False
# First, we check for a child task state that is not failed, and if we
# have one recurse into it for the next task. If we're done with the child
# state, we clear it and drop back to getting the next task from the list.
if state.tasks_child_state:
(state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host)
if self._check_failed_state(state.tasks_child_state):
# failed child state, so clear it and move into the rescue portion
state.tasks_child_state = None
self._set_failed_state(state)
else:
# get the next task recursively
if task is None or state.tasks_child_state.run_state == IteratingStates.COMPLETE:
# we're done with the child state, so clear it and continue
# back to the top of the loop to get the next task
state.tasks_child_state = None
continue
else:
# First here, we check to see if we've failed anywhere down the chain
# of states we have, and if so we move onto the rescue portion. Otherwise,
# we check to see if we've moved past the end of the list of tasks. If so,
# we move into the always portion of the block, otherwise we get the next
# task from the list.
if self._check_failed_state(state):
state.run_state = IteratingStates.RESCUE
elif state.cur_regular_task >= len(block.block):
state.run_state = IteratingStates.ALWAYS
else:
task = block.block[state.cur_regular_task]
# if the current task is actually a child block, create a child
# state for us to recurse into on the next pass
if isinstance(task, Block):
state.tasks_child_state = HostState(blocks=[task])
state.tasks_child_state.run_state = IteratingStates.TASKS
# since we've created the child state, clear the task
# so we can pick up the child state on the next pass
task = None
state.cur_regular_task += 1
elif state.run_state == IteratingStates.RESCUE:
# The process here is identical to IteratingStates.TASKS, except instead
# we move into the always portion of the block.
if host.name in self._play._removed_hosts:
self._play._removed_hosts.remove(host.name)
if state.rescue_child_state:
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host)
if self._check_failed_state(state.rescue_child_state):
state.rescue_child_state = None
self._set_failed_state(state)
else:
if task is None or state.rescue_child_state.run_state == IteratingStates.COMPLETE:
state.rescue_child_state = None
continue
else:
if state.fail_state & FailedStates.RESCUE == FailedStates.RESCUE:
state.run_state = IteratingStates.ALWAYS
elif state.cur_rescue_task >= len(block.rescue):
if len(block.rescue) > 0:
state.fail_state = FailedStates.NONE
state.run_state = IteratingStates.ALWAYS
state.did_rescue = True
else:
task = block.rescue[state.cur_rescue_task]
if isinstance(task, Block):
state.rescue_child_state = HostState(blocks=[task])
state.rescue_child_state.run_state = IteratingStates.TASKS
task = None
state.cur_rescue_task += 1
elif state.run_state == IteratingStates.ALWAYS:
# And again, the process here is identical to IteratingStates.TASKS, except
# instead we either move onto the next block in the list, or we set the
# run state to IteratingStates.COMPLETE in the event of any errors, or when we
# have hit the end of the list of blocks.
if state.always_child_state:
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host)
if self._check_failed_state(state.always_child_state):
state.always_child_state = None
self._set_failed_state(state)
else:
if task is None or state.always_child_state.run_state == IteratingStates.COMPLETE:
state.always_child_state = None
continue
else:
if state.cur_always_task >= len(block.always):
if state.fail_state != FailedStates.NONE:
state.run_state = IteratingStates.COMPLETE
else:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.run_state = IteratingStates.TASKS
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
state.did_rescue = False
else:
task = block.always[state.cur_always_task]
if isinstance(task, Block):
state.always_child_state = HostState(blocks=[task])
state.always_child_state.run_state = IteratingStates.TASKS
task = None
state.cur_always_task += 1
elif state.run_state == IteratingStates.COMPLETE:
return (state, None)
# if something above set the task, break out of the loop now
if task:
break
return (state, task)
def _set_failed_state(self, state):
if state.run_state == IteratingStates.SETUP:
state.fail_state |= FailedStates.SETUP
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.TASKS:
if state.tasks_child_state is not None:
state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
else:
state.fail_state |= FailedStates.TASKS
if state._blocks[state.cur_block].rescue:
state.run_state = IteratingStates.RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = IteratingStates.ALWAYS
else:
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.RESCUE:
if state.rescue_child_state is not None:
state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
else:
state.fail_state |= FailedStates.RESCUE
if state._blocks[state.cur_block].always:
state.run_state = IteratingStates.ALWAYS
else:
state.run_state = IteratingStates.COMPLETE
elif state.run_state == IteratingStates.ALWAYS:
if state.always_child_state is not None:
state.always_child_state = self._set_failed_state(state.always_child_state)
else:
state.fail_state |= FailedStates.ALWAYS
state.run_state = IteratingStates.COMPLETE
return state
def mark_host_failed(self, host):
s = self.get_host_state(host)
display.debug("marking host %s failed, current state: %s" % (host, s))
s = self._set_failed_state(s)
display.debug("^ failed state is now: %s" % s)
self.set_state_for_host(host.name, s)
self._play._removed_hosts.append(host.name)
def get_failed_hosts(self):
return dict((host, True) for (host, state) in self._host_states.items() if self._check_failed_state(state))
def _check_failed_state(self, state):
if state is None:
return False
elif state.run_state == IteratingStates.RESCUE and self._check_failed_state(state.rescue_child_state):
return True
elif state.run_state == IteratingStates.ALWAYS and self._check_failed_state(state.always_child_state):
return True
elif state.fail_state != FailedStates.NONE:
if state.run_state == IteratingStates.RESCUE and state.fail_state & FailedStates.RESCUE == 0:
return False
elif state.run_state == IteratingStates.ALWAYS and state.fail_state & FailedStates.ALWAYS == 0:
return False
else:
return not (state.did_rescue and state.fail_state & FailedStates.ALWAYS == 0)
elif state.run_state == IteratingStates.TASKS and self._check_failed_state(state.tasks_child_state):
cur_block = state._blocks[state.cur_block]
if len(cur_block.rescue) > 0 and state.fail_state & FailedStates.RESCUE == 0:
return False
else:
return True
return False
def is_failed(self, host):
s = self.get_host_state(host)
return self._check_failed_state(s)
def get_active_state(self, state):
'''
Finds the active state, recursively if necessary when there are child states.
'''
if state.run_state == IteratingStates.TASKS and state.tasks_child_state is not None:
return self.get_active_state(state.tasks_child_state)
elif state.run_state == IteratingStates.RESCUE and state.rescue_child_state is not None:
return self.get_active_state(state.rescue_child_state)
elif state.run_state == IteratingStates.ALWAYS and state.always_child_state is not None:
return self.get_active_state(state.always_child_state)
return state
def is_any_block_rescuing(self, state):
'''
Given the current HostState state, determines if the current block, or any child blocks,
are in rescue mode.
'''
if state.run_state == IteratingStates.RESCUE:
return True
if state.tasks_child_state is not None:
return self.is_any_block_rescuing(state.tasks_child_state)
return False
def get_original_task(self, host, task):
# now a noop because we've changed the way we do caching
return (None, None)
def _insert_tasks_into_state(self, state, task_list):
# if we've failed at all, or if the task list is empty, just return the current state
if state.fail_state != FailedStates.NONE and state.run_state not in (IteratingStates.RESCUE, IteratingStates.ALWAYS) or not task_list:
return state
if state.run_state == IteratingStates.TASKS:
if state.tasks_child_state:
state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.block[:state.cur_regular_task]
after = target_block.block[state.cur_regular_task:]
target_block.block = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.RESCUE:
if state.rescue_child_state:
state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.rescue[:state.cur_rescue_task]
after = target_block.rescue[state.cur_rescue_task:]
target_block.rescue = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == IteratingStates.ALWAYS:
if state.always_child_state:
state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.always[:state.cur_always_task]
after = target_block.always[state.cur_always_task:]
target_block.always = before + task_list + after
state._blocks[state.cur_block] = target_block
return state
def add_tasks(self, host, task_list):
self.set_state_for_host(host.name, self._insert_tasks_into_state(self.get_host_state(host), task_list))
def set_state_for_host(self, hostname: str, state: HostState) -> None:
if not isinstance(state, HostState):
raise AnsibleAssertionError('Expected state to be a HostState but was a %s' % type(state))
self._host_states[hostname] = state
def set_run_state_for_host(self, hostname: str, run_state: IteratingStates) -> None:
if not isinstance(run_state, IteratingStates):
raise AnsibleAssertionError('Expected run_state to be a IteratingStates but was %s' % (type(run_state)))
self._host_states[hostname].run_state = run_state
def set_fail_state_for_host(self, hostname: str, fail_state: FailedStates) -> None:
if not isinstance(fail_state, FailedStates):
raise AnsibleAssertionError('Expected fail_state to be a FailedStates but was %s' % (type(fail_state)))
self._host_states[hostname].fail_state = fail_state
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 72,263 |
fact_path in ansible.cfg has no effect on ad-hoc setup or setup invoked as task
|
##### SUMMARY
When setting `fact_path` in `ansible.cfg`, the configured path is used only for `gather_facts`; it has no effect when using `setup` as a task or when running `ansible -m setup` ad-hoc. This behavior is in fact [documented](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/setup_module.html#):
> The default fact_path can be specified in ansible.cfg for when setup is automatically called as part of gather_facts
This is particularly painful when the controller is running, say, FreeBSD, as they package Ansible with a default of `/usr/local/etc/ansible/facts.d` which differs greatly from Ansible's typical default of `/etc/ansible/facts.d`. Note that I do not think this is a FreeBSD packaging issue.
##### ISSUE TYPE
- Feature Request
##### COMPONENT NAME
setup
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```
ansible 2.10.2
config file = /Users/jpm/.ansible.cfg
configured module search path = ['/etc/ansible/library']
ansible python module location = /private/tmp/an/env.v3/lib/python3.8/site-packages/ansible
executable location = /private/tmp/an/env.v3/bin/ansible
python version = 3.8.5 (default, Sep 27 2020, 11:38:54) [Clang 12.0.0 (clang-1200.0.32.2)]
```
##### CONFIGURATION
<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
```
ANSIBLE_NOCOWS(/Users/jpm/.ansible.cfg) = True
ANSIBLE_SSH_ARGS(/Users/jpm/.ansible.cfg) = -o PasswordAuthentication=no -o ControlMaster=auto -o ControlPersist=60s -o ControlPath=/tmp/ansible-ssh-%h-%p-%r -o PreferredAuthentications=publickey,gssapi-with-mic,gssapi-keyex,hostbased
DEFAULT_ACTION_PLUGIN_PATH(/Users/jpm/.ansible.cfg) = ['/etc/ansible/jp.action_plugins']
DEFAULT_FACT_PATH(/Users/jpm/.ansible.cfg) = /tmp/jp/facts.d
DEFAULT_FILTER_PLUGIN_PATH(/Users/jpm/.ansible.cfg) = ['/etc/ansible/jp.filter_plugins']
DEFAULT_HOST_LIST(/Users/jpm/.ansible.cfg) = ['/etc/ansible/hosts']
DEFAULT_LOAD_CALLBACK_PLUGINS(/Users/jpm/.ansible.cfg) = True
DEFAULT_MANAGED_STR(/Users/jpm/.ansible.cfg) = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
DEFAULT_MODULE_PATH(/Users/jpm/.ansible.cfg) = ['/etc/ansible/library']
RETRY_FILES_ENABLED(/Users/jpm/.ansible.cfg) = False
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
Linux, FreeBSD, MacOSX
##### STEPS TO REPRODUCE
1. Create an new directory (`/tmp/jp/facts.d`) and a fact file within
2. Run playbook
3. Run ad-hoc `setup`
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- hosts: 127.0.0.1
connection: local
gather_facts: true
tasks:
- debug: msg='{{ ansible_local.hw.name }}'
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
I expect the ad-hoc `setup` to produce the same values as the setup invocation via `gather_facts`
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
```console
$ cat /tmp/jp/facts.d/hw.fact
{ "name" : "Jane Jolie" }
$ ansible-playbook test.yml
PLAY [127.0.0.1] ********************************************************************
TASK [Gathering Facts] **************************************************************
ok: [localhost]
TASK [debug] ************************************************************************
ok: [localhost] => {
"msg": "Jane Jolie"
}
```
This does not work as expected:
```console
$ ansible 127.0.0.1 -c local -m setup -a filter=ansible_local
localhost | SUCCESS => {
"ansible_facts": {
"ansible_local": {}
},
"changed": false
}
```
|
https://github.com/ansible/ansible/issues/72263
|
https://github.com/ansible/ansible/pull/76053
|
c53e6d94e987c27f60bed55a2dc8001ffcd10cd2
|
0b6d3312dd4dcc7a53b9027e07723cd5752a65ea
| 2020-10-20T12:04:47Z |
python
| 2022-01-10T22:25:00Z |
lib/ansible/playbook/play.py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleParserError, AnsibleAssertionError
from ansible.module_utils._text import to_native
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.six import binary_type, string_types, text_type
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.block import Block
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
from ansible.vars.manager import preprocess_vars
from ansible.utils.display import Display
display = Display()
__all__ = ['Play']
class Play(Base, Taggable, CollectionSearch):
"""
A play is a language feature that represents a list of roles and/or
task/handler blocks to execute on a given set of hosts.
Usage:
Play.load(datastructure) -> Play
Play.something(...)
"""
# =================================================================================
_hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True, priority=-1)
# Facts
_gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True)
_gather_subset = FieldAttribute(isa='list', default=(lambda: C.DEFAULT_GATHER_SUBSET), listof=string_types, always_post_validate=True)
_gather_timeout = FieldAttribute(isa='int', default=C.DEFAULT_GATHER_TIMEOUT, always_post_validate=True)
_fact_path = FieldAttribute(isa='string', default=C.DEFAULT_FACT_PATH)
# Variable Attributes
_vars_files = FieldAttribute(isa='list', default=list, priority=99)
_vars_prompt = FieldAttribute(isa='list', default=list, always_post_validate=False)
# Role Attributes
_roles = FieldAttribute(isa='list', default=list, priority=90)
# Block (Task) Lists Attributes
_handlers = FieldAttribute(isa='list', default=list)
_pre_tasks = FieldAttribute(isa='list', default=list)
_post_tasks = FieldAttribute(isa='list', default=list)
_tasks = FieldAttribute(isa='list', default=list)
# Flag/Setting Attributes
_force_handlers = FieldAttribute(isa='bool', default=context.cliargs_deferred_get('force_handlers'), always_post_validate=True)
_max_fail_percentage = FieldAttribute(isa='percent', always_post_validate=True)
_serial = FieldAttribute(isa='list', default=list, always_post_validate=True)
_strategy = FieldAttribute(isa='string', default=C.DEFAULT_STRATEGY, always_post_validate=True)
_order = FieldAttribute(isa='string', always_post_validate=True)
# =================================================================================
def __init__(self):
super(Play, self).__init__()
self._included_conditional = None
self._included_path = None
self._removed_hosts = []
self.ROLE_CACHE = {}
self.only_tags = set(context.CLIARGS.get('tags', [])) or frozenset(('all',))
self.skip_tags = set(context.CLIARGS.get('skip_tags', []))
self._action_groups = {}
self._group_actions = {}
def __repr__(self):
return self.get_name()
def _validate_hosts(self, attribute, name, value):
# Only validate 'hosts' if a value was passed in to original data set.
if 'hosts' in self._ds:
if not value:
raise AnsibleParserError("Hosts list cannot be empty. Please check your playbook")
if is_sequence(value):
# Make sure each item in the sequence is a valid string
for entry in value:
if entry is None:
raise AnsibleParserError("Hosts list cannot contain values of 'None'. Please check your playbook")
elif not isinstance(entry, (binary_type, text_type)):
raise AnsibleParserError("Hosts list contains an invalid host value: '{host!s}'".format(host=entry))
elif not isinstance(value, (binary_type, text_type)):
raise AnsibleParserError("Hosts list must be a sequence or string. Please check your playbook.")
def get_name(self):
''' return the name of the Play '''
if self.name:
return self.name
if is_sequence(self.hosts):
self.name = ','.join(self.hosts)
else:
self.name = self.hosts or ''
return self.name
@staticmethod
def load(data, variable_manager=None, loader=None, vars=None):
p = Play()
if vars:
p.vars = vars.copy()
return p.load_data(data, variable_manager=variable_manager, loader=loader)
def preprocess_data(self, ds):
'''
Adjusts play datastructure to cleanup old/legacy items
'''
if not isinstance(ds, dict):
raise AnsibleAssertionError('while preprocessing data (%s), ds should be a dict but was a %s' % (ds, type(ds)))
# The use of 'user' in the Play datastructure was deprecated to
# line up with the same change for Tasks, due to the fact that
# 'user' conflicted with the user module.
if 'user' in ds:
# this should never happen, but error out with a helpful message
# to the user if it does...
if 'remote_user' in ds:
raise AnsibleParserError("both 'user' and 'remote_user' are set for this play. "
"The use of 'user' is deprecated, and should be removed", obj=ds)
ds['remote_user'] = ds['user']
del ds['user']
return super(Play, self).preprocess_data(ds)
def _load_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading tasks: %s" % to_native(e), obj=self._ds, orig_exc=e)
def _load_pre_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading pre_tasks", obj=self._ds, orig_exc=e)
def _load_post_tasks(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed tasks/blocks.
Bare tasks outside of a block are given an implicit block.
'''
try:
return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading post_tasks", obj=self._ds, orig_exc=e)
def _load_handlers(self, attr, ds):
'''
Loads a list of blocks from a list which may be mixed handlers/blocks.
Bare handlers outside of a block are given an implicit block.
'''
try:
return self._extend_value(
self.handlers,
load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader),
prepend=True
)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading handlers", obj=self._ds, orig_exc=e)
def _load_roles(self, attr, ds):
'''
Loads and returns a list of RoleInclude objects from the datastructure
list of role definitions and creates the Role from those objects
'''
if ds is None:
ds = []
try:
role_includes = load_list_of_roles(ds, play=self, variable_manager=self._variable_manager,
loader=self._loader, collection_search_list=self.collections)
except AssertionError as e:
raise AnsibleParserError("A malformed role declaration was encountered.", obj=self._ds, orig_exc=e)
roles = []
for ri in role_includes:
roles.append(Role.load(ri, play=self))
self.roles[:0] = roles
return self.roles
def _load_vars_prompt(self, attr, ds):
new_ds = preprocess_vars(ds)
vars_prompts = []
if new_ds is not None:
for prompt_data in new_ds:
if 'name' not in prompt_data:
raise AnsibleParserError("Invalid vars_prompt data structure, missing 'name' key", obj=ds)
for key in prompt_data:
if key not in ('name', 'prompt', 'default', 'private', 'confirm', 'encrypt', 'salt_size', 'salt', 'unsafe'):
raise AnsibleParserError("Invalid vars_prompt data structure, found unsupported key '%s'" % key, obj=ds)
vars_prompts.append(prompt_data)
return vars_prompts
def _compile_roles(self):
'''
Handles the role compilation step, returning a flat list of tasks
with the lowest level dependencies first. For example, if a role R
has a dependency D1, which also has a dependency D2, the tasks from
D2 are merged first, followed by D1, and lastly by the tasks from
the parent role R last. This is done for all roles in the Play.
'''
block_list = []
if len(self.roles) > 0:
for r in self.roles:
# Don't insert tasks from ``import/include_role``, preventing
# duplicate execution at the wrong time
if r.from_include:
continue
block_list.extend(r.compile(play=self))
return block_list
def compile_roles_handlers(self):
'''
Handles the role handler compilation step, returning a flat list of Handlers
This is done for all roles in the Play.
'''
block_list = []
if len(self.roles) > 0:
for r in self.roles:
if r.from_include:
continue
block_list.extend(r.get_handler_blocks(play=self))
return block_list
def compile(self):
'''
Compiles and returns the task list for this play, compiled from the
roles (which are themselves compiled recursively) and/or the list of
tasks specified in the play.
'''
# create a block containing a single flush handlers meta
# task, so we can be sure to run handlers at certain points
# of the playbook execution
flush_block = Block.load(
data={'meta': 'flush_handlers'},
play=self,
variable_manager=self._variable_manager,
loader=self._loader
)
for task in flush_block.block:
task.implicit = True
block_list = []
block_list.extend(self.pre_tasks)
block_list.append(flush_block)
block_list.extend(self._compile_roles())
block_list.extend(self.tasks)
block_list.append(flush_block)
block_list.extend(self.post_tasks)
block_list.append(flush_block)
return block_list
def get_vars(self):
return self.vars.copy()
def get_vars_files(self):
if self.vars_files is None:
return []
elif not isinstance(self.vars_files, list):
return [self.vars_files]
return self.vars_files
def get_handlers(self):
return self.handlers[:]
def get_roles(self):
return self.roles[:]
def get_tasks(self):
tasklist = []
for task in self.pre_tasks + self.tasks + self.post_tasks:
if isinstance(task, Block):
tasklist.append(task.block + task.rescue + task.always)
else:
tasklist.append(task)
return tasklist
def serialize(self):
data = super(Play, self).serialize()
roles = []
for role in self.get_roles():
roles.append(role.serialize())
data['roles'] = roles
data['included_path'] = self._included_path
data['action_groups'] = self._action_groups
data['group_actions'] = self._group_actions
return data
def deserialize(self, data):
super(Play, self).deserialize(data)
self._included_path = data.get('included_path', None)
self._action_groups = data.get('action_groups', {})
self._group_actions = data.get('group_actions', {})
if 'roles' in data:
role_data = data.get('roles', [])
roles = []
for role in role_data:
r = Role()
r.deserialize(role)
roles.append(r)
setattr(self, 'roles', roles)
del data['roles']
def copy(self):
new_me = super(Play, self).copy()
new_me.ROLE_CACHE = self.ROLE_CACHE.copy()
new_me._included_conditional = self._included_conditional
new_me._included_path = self._included_path
new_me._action_groups = self._action_groups
new_me._group_actions = self._group_actions
return new_me
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 69,034 |
Apt module to support minimum version specification ">="
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
It's a reopening of the closed issue #21449.
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
I would like to be able to specify the minimal version of a package to be installed instead of a (partial) exact match.
For example, I would like to have elasticsearch present on my system in a version equal or greater to 7.6.1 because feature X is present since this version:
- if elasticsearch is not installed: install the latest elasticsearch package
- if elasticsearch is installed in version 7.6.0: install the latest elasticsearch package (7.6.2)
- if elasticsearch is installed in version 7.6.1: do nothing, return ok
- if elasticsearch is installed in version 7.6.2 or more: do nothing, return ok
```yml
- name: Install Elasticsearch.
package:
name: elasticsearch>=7.6.1
state: present
```
##### ACTUAL RESULTS
```
fatal: [poc-elasticsearch]: FAILED! => {"changed": false, "msg": "No package matching 'elasticsearch>' is available"}
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
[apt module](https://docs.ansible.com/ansible/latest/modules/apt_module.html)
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
Answer to https://github.com/ansible/ansible/issues/21449#issuecomment-280158405
> Does apt itself support these? If not, I'm not certain we want to add it as that would require us to parse and compare dpkg version strings. If it does, then I could see us taking a PR to use the apt python bindings to do this... Someone would have to reconcile this with the current usage of the "=" to mean fnmatching, though.
Apt python bindings support the comparison of versions with `apt_pkg.version_compare`:
<!--- HINT: You can also paste gist.github.com links for larger files -->
```py
import apt
import apt_pkg
cache = apt.Cache()
pkg = cache['elasticsearch']
installed = pkg.installed.version
candidate = pkg.versions[0].version
vc = apt_pkg.version_compare(installed,candidate)
if vc > 0:
print(f'{installed} version > {candidate} version')
elif vc == 0:
print(f'{installed} version = {candidate} version')
elif vc < 0 and pkg.is_upgradable:
print(f'{installed} version < {candidate} version')
```
Documentation: https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html?highlight=version_compare#apt_pkg.version_compare
|
https://github.com/ansible/ansible/issues/69034
|
https://github.com/ansible/ansible/pull/75002
|
a5bea8b2f51ba9d9c001f713ee7e658cf1a96385
|
4a62c4e3e44b01a904aa86e9b87206a24bd41fbc
| 2020-04-20T07:29:38Z |
python
| 2022-01-11T14:41:12Z |
changelogs/fragments/75002-apt_min_version.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 69,034 |
Apt module to support minimum version specification ">="
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
It's a reopening of the closed issue #21449.
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
I would like to be able to specify the minimal version of a package to be installed instead of a (partial) exact match.
For example, I would like to have elasticsearch present on my system in a version equal or greater to 7.6.1 because feature X is present since this version:
- if elasticsearch is not installed: install the latest elasticsearch package
- if elasticsearch is installed in version 7.6.0: install the latest elasticsearch package (7.6.2)
- if elasticsearch is installed in version 7.6.1: do nothing, return ok
- if elasticsearch is installed in version 7.6.2 or more: do nothing, return ok
```yml
- name: Install Elasticsearch.
package:
name: elasticsearch>=7.6.1
state: present
```
##### ACTUAL RESULTS
```
fatal: [poc-elasticsearch]: FAILED! => {"changed": false, "msg": "No package matching 'elasticsearch>' is available"}
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
[apt module](https://docs.ansible.com/ansible/latest/modules/apt_module.html)
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
Answer to https://github.com/ansible/ansible/issues/21449#issuecomment-280158405
> Does apt itself support these? If not, I'm not certain we want to add it as that would require us to parse and compare dpkg version strings. If it does, then I could see us taking a PR to use the apt python bindings to do this... Someone would have to reconcile this with the current usage of the "=" to mean fnmatching, though.
Apt python bindings support the comparison of versions with `apt_pkg.version_compare`:
<!--- HINT: You can also paste gist.github.com links for larger files -->
```py
import apt
import apt_pkg
cache = apt.Cache()
pkg = cache['elasticsearch']
installed = pkg.installed.version
candidate = pkg.versions[0].version
vc = apt_pkg.version_compare(installed,candidate)
if vc > 0:
print(f'{installed} version > {candidate} version')
elif vc == 0:
print(f'{installed} version = {candidate} version')
elif vc < 0 and pkg.is_upgradable:
print(f'{installed} version < {candidate} version')
```
Documentation: https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html?highlight=version_compare#apt_pkg.version_compare
|
https://github.com/ansible/ansible/issues/69034
|
https://github.com/ansible/ansible/pull/75002
|
a5bea8b2f51ba9d9c001f713ee7e658cf1a96385
|
4a62c4e3e44b01a904aa86e9b87206a24bd41fbc
| 2020-04-20T07:29:38Z |
python
| 2022-01-11T14:41:12Z |
lib/ansible/modules/apt.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Flowroute LLC
# Written by Matthew Williams <[email protected]>
# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: apt
short_description: Manages apt-packages
description:
- Manages I(apt) packages (such as for Debian/Ubuntu).
version_added: "0.0.2"
options:
name:
description:
- A list of package names, like C(foo), or package specifier with version, like C(foo=1.0).
Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported.
aliases: [ package, pkg ]
type: list
elements: str
state:
description:
- Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies
are installed. C(fixed) attempt to correct a system with broken dependencies in place.
type: str
default: present
choices: [ absent, build-dep, latest, present, fixed ]
update_cache:
description:
- Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
- Default is not to update the cache.
aliases: [ update-cache ]
type: bool
update_cache_retries:
description:
- Amount of retries if the cache update fails. Also see I(update_cache_retry_max_delay).
type: int
default: 5
version_added: '2.10'
update_cache_retry_max_delay:
description:
- Use an exponential backoff delay for each retry (see I(update_cache_retries)) up to this max delay in seconds.
type: int
default: 12
version_added: '2.10'
cache_valid_time:
description:
- Update the apt cache if it is older than the I(cache_valid_time). This option is set in seconds.
- As of Ansible 2.4, if explicitly set, this sets I(update_cache=yes).
type: int
default: 0
purge:
description:
- Will force purging of configuration files if the module state is set to I(absent).
type: bool
default: 'no'
default_release:
description:
- Corresponds to the C(-t) option for I(apt) and sets pin priorities
aliases: [ default-release ]
type: str
install_recommends:
description:
- Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install
recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
aliases: [ install-recommends ]
type: bool
force:
description:
- 'Corresponds to the C(--force-yes) to I(apt-get) and implies C(allow_unauthenticated: yes) and C(allow_downgrade: yes)'
- "This option will disable checking both the packages' signatures and the certificates of the
web servers they are downloaded from."
- 'This option *is not* the equivalent of passing the C(-f) flag to I(apt-get) on the command line'
- '**This is a destructive operation with the potential to destroy your system, and it should almost never be used.**
Please also see C(man apt-get) for more information.'
type: bool
default: 'no'
allow_unauthenticated:
description:
- Ignore if packages cannot be authenticated. This is useful for bootstrapping environments that manage their own apt-key setup.
- 'C(allow_unauthenticated) is only supported with state: I(install)/I(present)'
aliases: [ allow-unauthenticated ]
type: bool
default: 'no'
version_added: "2.1"
allow_downgrade:
description:
- Corresponds to the C(--allow-downgrades) option for I(apt).
- This option enables the named package and version to replace an already installed higher version of that package.
- Note that setting I(allow_downgrade=true) can make this module behave in a non-idempotent way.
- (The task could end up with a set of packages that does not match the complete list of specified packages to install).
aliases: [ allow-downgrade, allow_downgrades, allow-downgrades ]
type: bool
default: 'no'
version_added: "2.12"
allow_change_held_packages:
description:
- Allows changing the version of a package which is on the apt hold list
type: bool
default: 'no'
version_added: '2.13'
upgrade:
description:
- If yes or safe, performs an aptitude safe-upgrade.
- If full, performs an aptitude full-upgrade.
- If dist, performs an apt-get dist-upgrade.
- 'Note: This does not upgrade a specific package, use state=latest for that.'
- 'Note: Since 2.4, apt-get is used as a fall-back if aptitude is not present.'
version_added: "1.1"
choices: [ dist, full, 'no', safe, 'yes' ]
default: 'no'
type: str
dpkg_options:
description:
- Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"'
- Options should be supplied as comma separated list
default: force-confdef,force-confold
type: str
deb:
description:
- Path to a .deb package on the remote machine.
- If :// in the path, ansible will attempt to download deb before installing. (Version added 2.1)
- Requires the C(xz-utils) package to extract the control file of the deb package to install.
type: path
required: false
version_added: "1.6"
autoremove:
description:
- If C(yes), remove unused dependency packages for all module states except I(build-dep). It can also be used as the only option.
- Previous to version 2.4, autoclean was also an alias for autoremove, now it is its own separate command. See documentation for further information.
type: bool
default: 'no'
version_added: "2.1"
autoclean:
description:
- If C(yes), cleans the local repository of retrieved package files that can no longer be downloaded.
type: bool
default: 'no'
version_added: "2.4"
policy_rc_d:
description:
- Force the exit code of /usr/sbin/policy-rc.d.
- For example, if I(policy_rc_d=101) the installed package will not trigger a service start.
- If /usr/sbin/policy-rc.d already exists, it is backed up and restored after the package installation.
- If C(null), the /usr/sbin/policy-rc.d isn't created/changed.
type: int
default: null
version_added: "2.8"
only_upgrade:
description:
- Only upgrade a package if it is already installed.
type: bool
default: 'no'
version_added: "2.1"
fail_on_autoremove:
description:
- 'Corresponds to the C(--no-remove) option for C(apt).'
- 'If C(yes), it is ensured that no packages will be removed or the task will fail.'
- 'C(fail_on_autoremove) is only supported with state except C(absent)'
type: bool
default: 'no'
version_added: "2.11"
force_apt_get:
description:
- Force usage of apt-get instead of aptitude
type: bool
default: 'no'
version_added: "2.4"
lock_timeout:
description:
- How many seconds will this action wait to acquire a lock on the apt db.
- Sometimes there is a transitory lock and this will retry at least until timeout is hit.
type: int
default: 60
version_added: "2.12"
requirements:
- python-apt (python 2)
- python3-apt (python 3)
- aptitude (before 2.4)
author: "Matthew Williams (@mgwilliams)"
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: full
platform:
platforms: debian
notes:
- Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) required C(aptitude) up to 2.3, since 2.4 C(apt-get) is used as a fall-back.
- In most cases, packages installed with apt will start newly installed services by default. Most distributions have mechanisms to avoid this.
For example when installing Postgresql-9.5 in Debian 9, creating an excutable shell script (/usr/sbin/policy-rc.d) that throws
a return code of 101 will stop Postgresql 9.5 starting up after install. Remove the file or remove its execute permission afterwards.
- The apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier
(If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user.
Since we don't have warnings and prompts before installing we disallow this.Use an explicit fnmatch pattern if you want wildcarding)
- When used with a C(loop:) each package will be processed individually, it is much more efficient to pass the list directly to the I(name) option.
'''
EXAMPLES = '''
- name: Install apache httpd (state=present is optional)
apt:
name: apache2
state: present
- name: Update repositories cache and install "foo" package
apt:
name: foo
update_cache: yes
- name: Remove "foo" package
apt:
name: foo
state: absent
- name: Install the package "foo"
apt:
name: foo
- name: Install a list of packages
apt:
pkg:
- foo
- foo-tools
- name: Install the version '1.00' of package "foo"
apt:
name: foo=1.00
- name: Update the repository cache and update package "nginx" to latest version using default release squeeze-backport
apt:
name: nginx
state: latest
default_release: squeeze-backports
update_cache: yes
- name: Install the version '1.18.0' of package "nginx" and allow potential downgrades
apt:
name: nginx=1.18.0
state: present
allow_downgrade: yes
- name: Install zfsutils-linux with ensuring conflicted packages (e.g. zfs-fuse) will not be removed.
apt:
name: zfsutils-linux
state: latest
fail_on_autoremove: yes
- name: Install latest version of "openjdk-6-jdk" ignoring "install-recommends"
apt:
name: openjdk-6-jdk
state: latest
install_recommends: no
- name: Update all packages to their latest version
apt:
name: "*"
state: latest
- name: Upgrade the OS (apt-get dist-upgrade)
apt:
upgrade: dist
- name: Run the equivalent of "apt-get update" as a separate step
apt:
update_cache: yes
- name: Only run "update_cache=yes" if the last one is more than 3600 seconds ago
apt:
update_cache: yes
cache_valid_time: 3600
- name: Pass options to dpkg on run
apt:
upgrade: dist
update_cache: yes
dpkg_options: 'force-confold,force-confdef'
- name: Install a .deb package
apt:
deb: /tmp/mypackage.deb
- name: Install the build dependencies for package "foo"
apt:
pkg: foo
state: build-dep
- name: Install a .deb package from the internet
apt:
deb: https://example.com/python-ppq_0.1-1_all.deb
- name: Remove useless packages from the cache
apt:
autoclean: yes
- name: Remove dependencies that are no longer required
apt:
autoremove: yes
'''
RETURN = '''
cache_updated:
description: if the cache was updated or not
returned: success, in some cases
type: bool
sample: True
cache_update_time:
description: time of the last cache update (0 if unknown)
returned: success, in some cases
type: int
sample: 1425828348000
stdout:
description: output from apt
returned: success, when needed
type: str
sample: "Reading package lists...\nBuilding dependency tree...\nReading state information...\nThe following extra packages will be installed:\n apache2-bin ..."
stderr:
description: error output from apt
returned: success, when needed
type: str
sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..."
''' # NOQA
# added to stave off future warnings about apt api
import warnings
warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning)
import datetime
import fnmatch
import itertools
import os
import random
import re
import shutil
import sys
import tempfile
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.respawn import has_respawned, probe_interpreters_for_module, respawn_module
from ansible.module_utils._text import to_native
from ansible.module_utils.six import PY3
from ansible.module_utils.urls import fetch_file
DPKG_OPTIONS = 'force-confdef,force-confold'
APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed"
APT_LISTS_PATH = "/var/lib/apt/lists"
APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
APT_MARK_INVALID_OP = 'Invalid operation'
APT_MARK_INVALID_OP_DEB6 = 'Usage: apt-mark [options] {markauto|unmarkauto} packages'
CLEAN_OP_CHANGED_STR = dict(
autoremove='The following packages will be REMOVED',
# "Del python3-q 2.4-1 [24 kB]"
autoclean='Del ',
)
apt = apt_pkg = None # keep pylint happy by declaring unconditionally
HAS_PYTHON_APT = False
try:
import apt
import apt.debfile
import apt_pkg
HAS_PYTHON_APT = True
except ImportError:
pass
class PolicyRcD(object):
"""
This class is a context manager for the /usr/sbin/policy-rc.d file.
It allow the user to prevent dpkg to start the corresponding service when installing
a package.
https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
"""
def __init__(self, module):
# we need the module for later use (eg. fail_json)
self.m = module
# if policy_rc_d is null then we don't need to modify policy-rc.d
if self.m.params['policy_rc_d'] is None:
return
# if the /usr/sbin/policy-rc.d already exists
# we will back it up during package installation
# then restore it
if os.path.exists('/usr/sbin/policy-rc.d'):
self.backup_dir = tempfile.mkdtemp(prefix="ansible")
else:
self.backup_dir = None
def __enter__(self):
"""
This method will be called when we enter the context, before we call `apt-get …`
"""
# if policy_rc_d is null then we don't need to modify policy-rc.d
if self.m.params['policy_rc_d'] is None:
return
# if the /usr/sbin/policy-rc.d already exists we back it up
if self.backup_dir:
try:
shutil.move('/usr/sbin/policy-rc.d', self.backup_dir)
except Exception:
self.m.fail_json(msg="Fail to move /usr/sbin/policy-rc.d to %s" % self.backup_dir)
# we write /usr/sbin/policy-rc.d so it always exits with code policy_rc_d
try:
with open('/usr/sbin/policy-rc.d', 'w') as policy_rc_d:
policy_rc_d.write('#!/bin/sh\nexit %d\n' % self.m.params['policy_rc_d'])
os.chmod('/usr/sbin/policy-rc.d', 0o0755)
except Exception:
self.m.fail_json(msg="Failed to create or chmod /usr/sbin/policy-rc.d")
def __exit__(self, type, value, traceback):
"""
This method will be called when we enter the context, before we call `apt-get …`
"""
# if policy_rc_d is null then we don't need to modify policy-rc.d
if self.m.params['policy_rc_d'] is None:
return
if self.backup_dir:
# if /usr/sbin/policy-rc.d already exists before the call to __enter__
# we restore it (from the backup done in __enter__)
try:
shutil.move(os.path.join(self.backup_dir, 'policy-rc.d'),
'/usr/sbin/policy-rc.d')
os.rmdir(self.backup_dir)
except Exception:
self.m.fail_json(msg="Fail to move back %s to /usr/sbin/policy-rc.d"
% os.path.join(self.backup_dir, 'policy-rc.d'))
else:
# if there wasn't a /usr/sbin/policy-rc.d file before the call to __enter__
# we just remove the file
try:
os.remove('/usr/sbin/policy-rc.d')
except Exception:
self.m.fail_json(msg="Fail to remove /usr/sbin/policy-rc.d (after package manipulation)")
def package_split(pkgspec):
parts = pkgspec.split('=', 1)
version = None
if len(parts) > 1:
version = parts[1]
return parts[0], version
def package_versions(pkgname, pkg, pkg_cache):
try:
versions = set(p.version for p in pkg.versions)
except AttributeError:
# assume older version of python-apt is installed
# apt.package.Package#versions require python-apt >= 0.7.9.
pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname)
pkg_versions = (p.VersionList for p in pkg_cache_list)
versions = set(p.VerStr for p in itertools.chain(*pkg_versions))
return versions
def package_version_compare(version, other_version):
try:
return apt_pkg.version_compare(version, other_version)
except AttributeError:
return apt_pkg.VersionCompare(version, other_version)
def package_status(m, pkgname, version, cache, state):
try:
# get the package from the cache, as well as the
# low-level apt_pkg.Package object which contains
# state fields not directly accessible from the
# higher-level apt.package.Package object.
pkg = cache[pkgname]
ll_pkg = cache._cache[pkgname] # the low-level package object
except KeyError:
if state == 'install':
try:
provided_packages = cache.get_providing_packages(pkgname)
if provided_packages:
is_installed = False
upgradable = False
version_ok = False
# when virtual package providing only one package, look up status of target package
if cache.is_virtual_package(pkgname) and len(provided_packages) == 1:
package = provided_packages[0]
installed, version_ok, upgradable, has_files = package_status(m, package.name, version, cache, state='install')
if installed:
is_installed = True
return is_installed, version_ok, upgradable, False
m.fail_json(msg="No package matching '%s' is available" % pkgname)
except AttributeError:
# python-apt version too old to detect virtual packages
# mark as upgradable and let apt-get install deal with it
return False, False, True, False
else:
return False, False, False, False
try:
has_files = len(pkg.installed_files) > 0
except UnicodeDecodeError:
has_files = True
except AttributeError:
has_files = False # older python-apt cannot be used to determine non-purged
try:
package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED
except AttributeError: # python-apt 0.7.X has very weak low-level object
try:
# might not be necessary as python-apt post-0.7.X should have current_state property
package_is_installed = pkg.is_installed
except AttributeError:
# assume older version of python-apt is installed
package_is_installed = pkg.isInstalled
version_is_installed = package_is_installed
if version:
versions = package_versions(pkgname, pkg, cache._cache)
avail_upgrades = fnmatch.filter(versions, version)
if package_is_installed:
try:
installed_version = pkg.installed.version
except AttributeError:
installed_version = pkg.installedVersion
# check if the version is matched as well
version_is_installed = fnmatch.fnmatch(installed_version, version)
# Only claim the package is upgradable if a candidate matches the version
package_is_upgradable = False
for candidate in avail_upgrades:
if package_version_compare(candidate, installed_version) > 0:
package_is_upgradable = True
break
else:
package_is_upgradable = bool(avail_upgrades)
else:
try:
package_is_upgradable = pkg.is_upgradable
except AttributeError:
# assume older version of python-apt is installed
package_is_upgradable = pkg.isUpgradable
return package_is_installed, version_is_installed, package_is_upgradable, has_files
def expand_dpkg_options(dpkg_options_compressed):
options_list = dpkg_options_compressed.split(',')
dpkg_options = ""
for dpkg_option in options_list:
dpkg_options = '%s -o "Dpkg::Options::=--%s"' \
% (dpkg_options, dpkg_option)
return dpkg_options.strip()
def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
# Note: apt-get does implicit regex matching when an exact package name
# match is not found. Something like this:
# matches = [pkg.name for pkg in cache if re.match(pkgspec, pkg.name)]
# (Should also deal with the ':' for multiarch like the fnmatch code below)
#
# We have decided not to do similar implicit regex matching but might take
# a PR to add some sort of explicit regex matching:
# https://github.com/ansible/ansible-modules-core/issues/1258
new_pkgspec = []
if pkgspec:
for pkgspec_pattern in pkgspec:
pkgname_pattern, version = package_split(pkgspec_pattern)
# note that none of these chars is allowed in a (debian) pkgname
if frozenset('*?[]!').intersection(pkgname_pattern):
# handle multiarch pkgnames, the idea is that "apt*" should
# only select native packages. But "apt*:i386" should still work
if ":" not in pkgname_pattern:
# Filter the multiarch packages from the cache only once
try:
pkg_name_cache = _non_multiarch
except NameError:
pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if ':' not in pkg.name] # noqa: F841
else:
# Create a cache of pkg_names including multiarch only once
try:
pkg_name_cache = _all_pkg_names
except NameError:
pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache] # noqa: F841
matches = fnmatch.filter(pkg_name_cache, pkgname_pattern)
if not matches:
m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_pattern))
else:
new_pkgspec.extend(matches)
else:
# No wildcards in name
new_pkgspec.append(pkgspec_pattern)
return new_pkgspec
def parse_diff(output):
diff = to_native(output).splitlines()
try:
# check for start marker from aptitude
diff_start = diff.index('Resolving dependencies...')
except ValueError:
try:
# check for start marker from apt-get
diff_start = diff.index('Reading state information...')
except ValueError:
# show everything
diff_start = -1
try:
# check for end marker line from both apt-get and aptitude
diff_end = next(i for i, item in enumerate(diff) if re.match('[0-9]+ (packages )?upgraded', item))
except StopIteration:
diff_end = len(diff)
diff_start += 1
diff_end += 1
return {'prepared': '\n'.join(diff[diff_start:diff_end])}
def mark_installed_manually(m, packages):
if not packages:
return
apt_mark_cmd_path = m.get_bin_path("apt-mark")
# https://github.com/ansible/ansible/issues/40531
if apt_mark_cmd_path is None:
m.warn("Could not find apt-mark binary, not marking package(s) as manually installed.")
return
cmd = "%s manual %s" % (apt_mark_cmd_path, ' '.join(packages))
rc, out, err = m.run_command(cmd)
if APT_MARK_INVALID_OP in err or APT_MARK_INVALID_OP_DEB6 in err:
cmd = "%s unmarkauto %s" % (apt_mark_cmd_path, ' '.join(packages))
rc, out, err = m.run_command(cmd)
if rc != 0:
m.fail_json(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
def install(m, pkgspec, cache, upgrade=False, default_release=None,
install_recommends=None, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
build_dep=False, fixed=False, autoremove=False, fail_on_autoremove=False, only_upgrade=False,
allow_unauthenticated=False, allow_downgrade=False, allow_change_held_packages=False):
pkg_list = []
packages = ""
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
package_names = []
for package in pkgspec:
if build_dep:
# Let apt decide what to install
pkg_list.append("'%s'" % package)
continue
name, version = package_split(package)
package_names.append(name)
installed, installed_version, upgradable, has_files = package_status(m, name, version, cache, state='install')
if (not installed and not only_upgrade) or (installed and not installed_version) or (upgrade and upgradable):
pkg_list.append("'%s'" % package)
if installed_version and upgradable and version:
# This happens when the package is installed, a newer version is
# available, and the version is a wildcard that matches both
#
# We do not apply the upgrade flag because we cannot specify both
# a version and state=latest. (This behaviour mirrors how apt
# treats a version with wildcard in the package)
pkg_list.append("'%s'" % package)
packages = ' '.join(pkg_list)
if packages:
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if fail_on_autoremove:
fail_on_autoremove = '--no-remove'
else:
fail_on_autoremove = ''
if only_upgrade:
only_upgrade = '--only-upgrade'
else:
only_upgrade = ''
if fixed:
fixed = '--fix-broken'
else:
fixed = ''
if build_dep:
cmd = "%s -y %s %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, fail_on_autoremove, check_arg, packages)
else:
cmd = "%s -y %s %s %s %s %s %s %s install %s" % \
(APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, fail_on_autoremove, check_arg, packages)
if default_release:
cmd += " -t '%s'" % (default_release,)
if install_recommends is False:
cmd += " -o APT::Install-Recommends=no"
elif install_recommends is True:
cmd += " -o APT::Install-Recommends=yes"
# install_recommends is None uses the OS default
if allow_unauthenticated:
cmd += " --allow-unauthenticated"
if allow_downgrade:
cmd += " --allow-downgrades"
if allow_change_held_packages:
cmd += " --allow-change-held-packages"
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
status = True
changed = True
if build_dep:
changed = APT_GET_ZERO not in out
data = dict(changed=changed, stdout=out, stderr=err, diff=diff)
if rc:
status = False
data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
else:
status = True
data = dict(changed=False)
if not build_dep:
mark_installed_manually(m, package_names)
return (status, data)
def get_field_of_deb(m, deb_file, field="Version"):
cmd_dpkg = m.get_bin_path("dpkg", True)
cmd = cmd_dpkg + " --field %s %s" % (deb_file, field)
rc, stdout, stderr = m.run_command(cmd)
if rc != 0:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
return to_native(stdout).strip('\n')
def install_deb(
m, debs, cache, force, fail_on_autoremove, install_recommends,
allow_unauthenticated,
allow_downgrade,
allow_change_held_packages,
dpkg_options,
):
changed = False
deps_to_install = []
pkgs_to_install = []
for deb_file in debs.split(','):
try:
pkg = apt.debfile.DebPackage(deb_file, cache=apt.Cache())
pkg_name = get_field_of_deb(m, deb_file, "Package")
pkg_version = get_field_of_deb(m, deb_file, "Version")
if hasattr(apt_pkg, 'get_architectures') and len(apt_pkg.get_architectures()) > 1:
pkg_arch = get_field_of_deb(m, deb_file, "Architecture")
pkg_key = "%s:%s" % (pkg_name, pkg_arch)
else:
pkg_key = pkg_name
try:
installed_pkg = apt.Cache()[pkg_key]
installed_version = installed_pkg.installed.version
if package_version_compare(pkg_version, installed_version) == 0:
# Does not need to down-/upgrade, move on to next package
continue
except Exception:
# Must not be installed, continue with installation
pass
# Check if package is installable
if not pkg.check():
if force or ("later version" in pkg._failure_string and allow_downgrade):
pass
else:
m.fail_json(msg=pkg._failure_string)
# add any missing deps to the list of deps we need
# to install so they're all done in one shot
deps_to_install.extend(pkg.missing_deps)
except Exception as e:
m.fail_json(msg="Unable to install package: %s" % to_native(e))
# and add this deb to the list of packages to install
pkgs_to_install.append(deb_file)
# install the deps through apt
retvals = {}
if deps_to_install:
(success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
install_recommends=install_recommends,
fail_on_autoremove=fail_on_autoremove,
allow_unauthenticated=allow_unauthenticated,
allow_downgrade=allow_downgrade,
allow_change_held_packages=allow_change_held_packages,
dpkg_options=expand_dpkg_options(dpkg_options))
if not success:
m.fail_json(**retvals)
changed = retvals.get('changed', False)
if pkgs_to_install:
options = ' '.join(["--%s" % x for x in dpkg_options.split(",")])
if m.check_mode:
options += " --simulate"
if force:
options += " --force-all"
cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if "stdout" in retvals:
stdout = retvals["stdout"] + out
else:
stdout = out
if "diff" in retvals:
diff = retvals["diff"]
if 'prepared' in diff:
diff['prepared'] += '\n\n' + out
else:
diff = parse_diff(out)
if "stderr" in retvals:
stderr = retvals["stderr"] + err
else:
stderr = err
if rc == 0:
m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff)
else:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
else:
m.exit_json(changed=changed, stdout=retvals.get('stdout', ''), stderr=retvals.get('stderr', ''), diff=retvals.get('diff', ''))
def remove(m, pkgspec, cache, purge=False, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False):
pkg_list = []
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
for package in pkgspec:
name, version = package_split(package)
installed, installed_version, upgradable, has_files = package_status(m, name, version, cache, state='remove')
if installed_version or (has_files and purge):
pkg_list.append("'%s'" % package)
packages = ' '.join(pkg_list)
if not packages:
m.exit_json(changed=False)
else:
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if purge:
purge = '--purge'
else:
purge = ''
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
cmd = "%s -q -y %s %s %s %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, force_yes, autoremove, check_arg, packages)
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err, rc=rc)
m.exit_json(changed=True, stdout=out, stderr=err, diff=diff)
def cleanup(m, purge=False, force=False, operation=None,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
if operation not in frozenset(['autoremove', 'autoclean']):
raise AssertionError('Expected "autoremove" or "autoclean" cleanup operation, got %s' % operation)
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if purge:
purge = '--purge'
else:
purge = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
cmd = "%s -y %s %s %s %s %s" % (APT_GET_CMD, dpkg_options, purge, force_yes, operation, check_arg)
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'apt-get %s' failed: %s" % (operation, err), stdout=out, stderr=err, rc=rc)
changed = CLEAN_OP_CHANGED_STR[operation] in out
m.exit_json(changed=changed, stdout=out, stderr=err, diff=diff)
def upgrade(m, mode="yes", force=False, default_release=None,
use_apt_get=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False, fail_on_autoremove=False,
allow_unauthenticated=False,
allow_downgrade=False,
):
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
apt_cmd = None
prompt_regex = None
if mode == "dist" or (mode == "full" and use_apt_get):
# apt-get dist-upgrade
apt_cmd = APT_GET_CMD
upgrade_command = "dist-upgrade %s" % (autoremove)
elif mode == "full" and not use_apt_get:
# aptitude full-upgrade
apt_cmd = APTITUDE_CMD
upgrade_command = "full-upgrade"
else:
if use_apt_get:
apt_cmd = APT_GET_CMD
upgrade_command = "upgrade --with-new-pkgs %s" % (autoremove)
else:
# aptitude safe-upgrade # mode=yes # default
apt_cmd = APTITUDE_CMD
upgrade_command = "safe-upgrade"
prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])"
if force:
if apt_cmd == APT_GET_CMD:
force_yes = '--force-yes'
else:
force_yes = '--assume-yes --allow-untrusted'
else:
force_yes = ''
if fail_on_autoremove:
fail_on_autoremove = '--no-remove'
else:
fail_on_autoremove = ''
allow_unauthenticated = '--allow-unauthenticated' if allow_unauthenticated else ''
allow_downgrade = '--allow-downgrades' if allow_downgrade else ''
if apt_cmd is None:
if use_apt_get:
apt_cmd = APT_GET_CMD
else:
m.fail_json(msg="Unable to find APTITUDE in path. Please make sure "
"to have APTITUDE in path or use 'force_apt_get=True'")
apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
cmd = '%s -y %s %s %s %s %s %s %s' % (
apt_cmd_path,
dpkg_options,
force_yes,
fail_on_autoremove,
allow_unauthenticated,
allow_downgrade,
check_arg,
upgrade_command,
)
if default_release:
cmd += " -t '%s'" % (default_release,)
with PolicyRcD(m):
rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out, rc=rc)
if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out):
m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff)
def get_cache_mtime():
"""Return mtime of a valid apt cache file.
Stat the apt cache file and if no cache file is found return 0
:returns: ``int``
"""
cache_time = 0
if os.path.exists(APT_UPDATE_SUCCESS_STAMP_PATH):
cache_time = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime
elif os.path.exists(APT_LISTS_PATH):
cache_time = os.stat(APT_LISTS_PATH).st_mtime
return cache_time
def get_updated_cache_time():
"""Return the mtime time stamp and the updated cache time.
Always retrieve the mtime of the apt cache or set the `cache_mtime`
variable to 0
:returns: ``tuple``
"""
cache_mtime = get_cache_mtime()
mtimestamp = datetime.datetime.fromtimestamp(cache_mtime)
updated_cache_time = int(time.mktime(mtimestamp.timetuple()))
return mtimestamp, updated_cache_time
# https://github.com/ansible/ansible-modules-core/issues/2951
def get_cache(module):
'''Attempt to get the cache object and update till it works'''
cache = None
try:
cache = apt.Cache()
except SystemError as e:
if '/var/lib/apt/lists/' in to_native(e).lower():
# update cache until files are fixed or retries exceeded
retries = 0
while retries < 2:
(rc, so, se) = module.run_command(['apt-get', 'update', '-q'])
retries += 1
if rc == 0:
break
if rc != 0:
module.fail_json(msg='Updating the cache to correct corrupt package lists failed:\n%s\n%s' % (to_native(e), so + se), rc=rc)
# try again
cache = apt.Cache()
else:
module.fail_json(msg=to_native(e))
return cache
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
update_cache=dict(type='bool', aliases=['update-cache']),
update_cache_retries=dict(type='int', default=5),
update_cache_retry_max_delay=dict(type='int', default=12),
cache_valid_time=dict(type='int', default=0),
purge=dict(type='bool', default=False),
package=dict(type='list', elements='str', aliases=['pkg', 'name']),
deb=dict(type='path'),
default_release=dict(type='str', aliases=['default-release']),
install_recommends=dict(type='bool', aliases=['install-recommends']),
force=dict(type='bool', default=False),
upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes'], default='no'),
dpkg_options=dict(type='str', default=DPKG_OPTIONS),
autoremove=dict(type='bool', default=False),
autoclean=dict(type='bool', default=False),
fail_on_autoremove=dict(type='bool', default=False),
policy_rc_d=dict(type='int', default=None),
only_upgrade=dict(type='bool', default=False),
force_apt_get=dict(type='bool', default=False),
allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']),
allow_downgrade=dict(type='bool', default=False, aliases=['allow-downgrade', 'allow_downgrades', 'allow-downgrades']),
allow_change_held_packages=dict(type='bool', default=False),
lock_timeout=dict(type='int', default=60),
),
mutually_exclusive=[['deb', 'package', 'upgrade']],
required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
supports_check_mode=True,
)
# We screenscrape apt-get and aptitude output for information so we need
# to make sure we use the best parsable locale when running commands
# also set apt specific vars for desired behaviour
locale = get_best_parsable_locale(module)
# APT related constants
APT_ENV_VARS = dict(
DEBIAN_FRONTEND='noninteractive',
DEBIAN_PRIORITY='critical',
LANG=locale,
LC_ALL=locale,
LC_MESSAGES=locale,
LC_CTYPE=locale,
)
module.run_command_environ_update = APT_ENV_VARS
if not HAS_PYTHON_APT:
# This interpreter can't see the apt Python library- we'll do the following to try and fix that:
# 1) look in common locations for system-owned interpreters that can see it; if we find one, respawn under it
# 2) finding none, try to install a matching python-apt package for the current interpreter version;
# we limit to the current interpreter version to try and avoid installing a whole other Python just
# for apt support
# 3) if we installed a support package, try to respawn under what we think is the right interpreter (could be
# the current interpreter again, but we'll let it respawn anyway for simplicity)
# 4) if still not working, return an error and give up (some corner cases not covered, but this shouldn't be
# made any more complex than it already is to try and cover more, eg, custom interpreters taking over
# system locations)
apt_pkg_name = 'python3-apt' if PY3 else 'python-apt'
if has_respawned():
# this shouldn't be possible; short-circuit early if it happens...
module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
interpreters = ['/usr/bin/python3', '/usr/bin/python2', '/usr/bin/python']
interpreter = probe_interpreters_for_module(interpreters, 'apt')
if interpreter:
# found the Python bindings; respawn this module under the interpreter where we found them
respawn_module(interpreter)
# this is the end of the line for this process, it will exit here once the respawned module has completed
# don't make changes if we're in check_mode
if module.check_mode:
module.fail_json(msg="%s must be installed to use check mode. "
"If run normally this module can auto-install it." % apt_pkg_name)
# We skip cache update in auto install the dependency if the
# user explicitly declared it with update_cache=no.
if module.params.get('update_cache') is False:
module.warn("Auto-installing missing dependency without updating cache: %s" % apt_pkg_name)
else:
module.warn("Updating cache and auto-installing missing dependency: %s" % apt_pkg_name)
module.run_command(['apt-get', 'update'], check_rc=True)
# try to install the apt Python binding
module.run_command(['apt-get', 'install', '--no-install-recommends', apt_pkg_name, '-y', '-q'], check_rc=True)
# try again to find the bindings in common places
interpreter = probe_interpreters_for_module(interpreters, 'apt')
if interpreter:
# found the Python bindings; respawn this module under the interpreter where we found them
# NB: respawn is somewhat wasteful if it's this interpreter, but simplifies the code
respawn_module(interpreter)
# this is the end of the line for this process, it will exit here once the respawned module has completed
else:
# we've done all we can do; just tell the user it's busted and get out
module.fail_json(msg="{0} must be installed and visible from {1}.".format(apt_pkg_name, sys.executable))
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
global APT_GET_CMD
APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params
if p['upgrade'] == 'no':
p['upgrade'] = None
use_apt_get = p['force_apt_get']
if not use_apt_get and not APTITUDE_CMD:
use_apt_get = True
updated_cache = False
updated_cache_time = 0
install_recommends = p['install_recommends']
allow_unauthenticated = p['allow_unauthenticated']
allow_downgrade = p['allow_downgrade']
allow_change_held_packages = p['allow_change_held_packages']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
autoremove = p['autoremove']
fail_on_autoremove = p['fail_on_autoremove']
autoclean = p['autoclean']
# max times we'll retry
deadline = time.time() + p['lock_timeout']
# keep running on lock issues unless timeout or resolution is hit.
while True:
# Get the cache object, this has 3 retries built in
cache = get_cache(module)
try:
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
except AttributeError:
apt_pkg.Config['APT::Default-Release'] = p['default_release']
# reopen cache w/ modified config
cache.open(progress=None)
mtimestamp, updated_cache_time = get_updated_cache_time()
# Cache valid time is default 0, which will update the cache if
# needed and `update_cache` was set to true
updated_cache = False
if p['update_cache'] or p['cache_valid_time']:
now = datetime.datetime.now()
tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
if not mtimestamp + tdelta >= now:
# Retry to update the cache with exponential backoff
err = ''
update_cache_retries = module.params.get('update_cache_retries')
update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
randomize = random.randint(0, 1000) / 1000.0
for retry in range(update_cache_retries):
try:
cache.update()
break
except apt.cache.FetchFailedException as e:
err = to_native(e)
# Use exponential backoff plus a little bit of randomness
delay = 2 ** retry + randomize
if delay > update_cache_retry_max_delay:
delay = update_cache_retry_max_delay + randomize
time.sleep(delay)
else:
module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
cache.open(progress=None)
mtimestamp, post_cache_update_time = get_updated_cache_time()
if updated_cache_time != post_cache_update_time:
updated_cache = True
updated_cache_time = post_cache_update_time
# If there is nothing else to do exit. This will set state as
# changed based on if the cache was updated.
if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(
changed=updated_cache,
cache_updated=updated_cache,
cache_update_time=updated_cache_time
)
force_yes = p['force']
if p['upgrade']:
upgrade(
module,
p['upgrade'],
force_yes,
p['default_release'],
use_apt_get,
dpkg_options,
autoremove,
fail_on_autoremove,
allow_unauthenticated,
allow_downgrade
)
if p['deb']:
if p['state'] != 'present':
module.fail_json(msg="deb only supports state=present")
if '://' in p['deb']:
p['deb'] = fetch_file(module, p['deb'])
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
allow_unauthenticated=allow_unauthenticated,
allow_change_held_packages=allow_change_held_packages,
allow_downgrade=allow_downgrade,
force=force_yes, fail_on_autoremove=fail_on_autoremove, dpkg_options=p['dpkg_options'])
unfiltered_packages = p['package'] or ()
packages = [package.strip() for package in unfiltered_packages if package != '*']
all_installed = '*' in unfiltered_packages
latest = p['state'] == 'latest'
if latest and all_installed:
if packages:
module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
upgrade(
module,
'yes',
force_yes,
p['default_release'],
use_apt_get,
dpkg_options,
autoremove,
fail_on_autoremove,
allow_unauthenticated,
allow_downgrade
)
if packages:
for package in packages:
if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package)
if latest and '=' in package:
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if not packages:
if autoclean:
cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options)
if autoremove:
cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options)
if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
state_upgrade = False
state_builddep = False
state_fixed = False
if p['state'] == 'latest':
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
if p['state'] == 'fixed':
state_fixed = True
success, retvals = install(
module,
packages,
cache,
upgrade=state_upgrade,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes,
dpkg_options=dpkg_options,
build_dep=state_builddep,
fixed=state_fixed,
autoremove=autoremove,
fail_on_autoremove=fail_on_autoremove,
only_upgrade=p['only_upgrade'],
allow_unauthenticated=allow_unauthenticated,
allow_downgrade=allow_downgrade,
allow_change_held_packages=allow_change_held_packages,
)
# Store if the cache has been updated
retvals['cache_updated'] = updated_cache
# Store when the update time was last
retvals['cache_update_time'] = updated_cache_time
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] == 'absent':
remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove)
except apt.cache.LockFailedException as lockFailedException:
if time.time() < deadline:
continue
module.fail_json(msg="Failed to lock apt for exclusive operation: %s" % lockFailedException)
except apt.cache.FetchFailedException as fetchFailedException:
module.fail_json(msg="Could not fetch updated apt files: %s" % fetchFailedException)
# got here w/o exception and/or exit???
module.fail_json(msg='Unexpected code path taken, we really should have exited before, this is a bug')
if __name__ == "__main__":
main()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 69,034 |
Apt module to support minimum version specification ">="
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
It's a reopening of the closed issue #21449.
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
I would like to be able to specify the minimal version of a package to be installed instead of a (partial) exact match.
For example, I would like to have elasticsearch present on my system in a version equal or greater to 7.6.1 because feature X is present since this version:
- if elasticsearch is not installed: install the latest elasticsearch package
- if elasticsearch is installed in version 7.6.0: install the latest elasticsearch package (7.6.2)
- if elasticsearch is installed in version 7.6.1: do nothing, return ok
- if elasticsearch is installed in version 7.6.2 or more: do nothing, return ok
```yml
- name: Install Elasticsearch.
package:
name: elasticsearch>=7.6.1
state: present
```
##### ACTUAL RESULTS
```
fatal: [poc-elasticsearch]: FAILED! => {"changed": false, "msg": "No package matching 'elasticsearch>' is available"}
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
[apt module](https://docs.ansible.com/ansible/latest/modules/apt_module.html)
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
Answer to https://github.com/ansible/ansible/issues/21449#issuecomment-280158405
> Does apt itself support these? If not, I'm not certain we want to add it as that would require us to parse and compare dpkg version strings. If it does, then I could see us taking a PR to use the apt python bindings to do this... Someone would have to reconcile this with the current usage of the "=" to mean fnmatching, though.
Apt python bindings support the comparison of versions with `apt_pkg.version_compare`:
<!--- HINT: You can also paste gist.github.com links for larger files -->
```py
import apt
import apt_pkg
cache = apt.Cache()
pkg = cache['elasticsearch']
installed = pkg.installed.version
candidate = pkg.versions[0].version
vc = apt_pkg.version_compare(installed,candidate)
if vc > 0:
print(f'{installed} version > {candidate} version')
elif vc == 0:
print(f'{installed} version = {candidate} version')
elif vc < 0 and pkg.is_upgradable:
print(f'{installed} version < {candidate} version')
```
Documentation: https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html?highlight=version_compare#apt_pkg.version_compare
|
https://github.com/ansible/ansible/issues/69034
|
https://github.com/ansible/ansible/pull/75002
|
a5bea8b2f51ba9d9c001f713ee7e658cf1a96385
|
4a62c4e3e44b01a904aa86e9b87206a24bd41fbc
| 2020-04-20T07:29:38Z |
python
| 2022-01-11T14:41:12Z |
test/integration/targets/apt/tasks/repo.yml
|
- block:
- name: Install foo package version 1.0.0
apt:
name: foo=1.0.0
allow_unauthenticated: yes
register: apt_result
- name: Check install with dpkg
shell: dpkg-query -l foo
register: dpkg_result
- name: Check if install was successful
assert:
that:
- "apt_result is success"
- "dpkg_result is success"
- "'1.0.0' in dpkg_result.stdout"
- name: Update to foo version 1.0.1
apt:
name: foo
state: latest
allow_unauthenticated: yes
register: apt_result
- name: Check install with dpkg
shell: dpkg-query -l foo
register: dpkg_result
- name: Check if install was successful
assert:
that:
- "apt_result is success"
- "dpkg_result is success"
- "'1.0.1' in dpkg_result.stdout"
always:
- name: Clean up
apt:
name: foo
state: absent
allow_unauthenticated: yes
# https://github.com/ansible/ansible/issues/30638
- block:
- name: Fail to install foo=1.0.1 since foo is not installed and only_upgrade is set
apt:
name: foo=1.0.1
state: present
only_upgrade: yes
allow_unauthenticated: yes
ignore_errors: yes
register: apt_result
- name: Check that foo was not upgraded
assert:
that:
- "apt_result is not changed"
- apt:
name: foo=1.0.0
allow_unauthenticated: yes
- name: Upgrade foo to 1.0.1
apt:
name: foo=1.0.1
state: present
only_upgrade: yes
allow_unauthenticated: yes
register: apt_result
- name: Check install with dpkg
shell: dpkg-query -l foo
register: dpkg_result
- name: Check if install was successful
assert:
that:
- "apt_result is success"
- "dpkg_result is success"
- "'1.0.1' in dpkg_result.stdout"
always:
- name: Clean up
apt:
name: foo
state: absent
allow_unauthenticated: yes
# https://github.com/ansible/ansible/issues/35900
- block:
- name: Disable ubuntu repos so system packages are not upgraded and do not change testing env
command: mv /etc/apt/sources.list /etc/apt/sources.list.backup
- name: Install foobar, installs foo as a dependency
apt:
name: foobar=1.0.0
allow_unauthenticated: yes
- name: Upgrade foobar to a version which does not depend on foo, autoremove should remove foo
apt:
upgrade: dist
autoremove: yes
allow_unauthenticated: yes
- name: Check foo with dpkg
shell: dpkg-query -l foo
register: dpkg_result
ignore_errors: yes
- name: Check that foo was removed by autoremove
assert:
that:
- "dpkg_result is failed"
always:
- name: Clean up
apt:
pkg: foo,foobar
state: absent
autoclean: yes
- name: Restore ubuntu repos
command: mv /etc/apt/sources.list.backup /etc/apt/sources.list
# https://github.com/ansible/ansible/issues/26298
- block:
- name: Disable ubuntu repos so system packages are not upgraded and do not change testing env
command: mv /etc/apt/sources.list /etc/apt/sources.list.backup
- name: Install foobar, installs foo as a dependency
apt:
name: foobar=1.0.0
allow_unauthenticated: yes
- name: Upgrade foobar to a version which does not depend on foo
apt:
upgrade: dist
force: yes # workaround for --allow-unauthenticated used along with upgrade
- name: autoremove should remove foo
apt:
autoremove: yes
register: autoremove_result
- name: Check that autoremove correctly reports changed=True
assert:
that:
- "autoremove_result is changed"
- name: Check foo with dpkg
shell: dpkg-query -l foo
register: dpkg_result
ignore_errors: yes
- name: Check that foo was removed by autoremove
assert:
that:
- "dpkg_result is failed"
- name: Nothing to autoremove
apt:
autoremove: yes
register: autoremove_result
- name: Check that autoremove correctly reports changed=False
assert:
that:
- "autoremove_result is not changed"
- name: Create a fake .deb file for autoclean to remove
file:
name: /var/cache/apt/archives/python3-q_2.4-1_all.deb
state: touch
- name: autoclean fake .deb file
apt:
autoclean: yes
register: autoclean_result
- name: Check if the .deb file exists
stat:
path: /var/cache/apt/archives/python3-q_2.4-1_all.deb
register: stat_result
- name: Check that autoclean correctly reports changed=True and file was removed
assert:
that:
- "autoclean_result is changed"
- "not stat_result.stat.exists"
- name: Nothing to autoclean
apt:
autoclean: yes
register: autoclean_result
- name: Check that autoclean correctly reports changed=False
assert:
that:
- "autoclean_result is not changed"
always:
- name: Clean up
apt:
pkg: foo,foobar
state: absent
autoclean: yes
- name: Restore ubuntu repos
command: mv /etc/apt/sources.list.backup /etc/apt/sources.list
- name: Downgrades
import_tasks: "downgrade.yml"
- name: Upgrades
block:
- import_tasks: "upgrade.yml"
vars:
aptitude_present: "{{ True | bool }}"
upgrade_type: "dist"
force_apt_get: "{{ False | bool }}"
- name: Check if aptitude is installed
command: dpkg-query --show --showformat='${db:Status-Abbrev}' aptitude
register: aptitude_status
- name: Remove aptitude, if installed, to test fall-back to apt-get
apt:
pkg: aptitude
state: absent
when:
- aptitude_status.stdout.find('ii') != -1
- include_tasks: "upgrade.yml"
vars:
aptitude_present: "{{ False | bool }}"
upgrade_type: "{{ item.upgrade_type }}"
force_apt_get: "{{ item.force_apt_get }}"
with_items:
- { upgrade_type: safe, force_apt_get: False }
- { upgrade_type: full, force_apt_get: False }
- { upgrade_type: safe, force_apt_get: True }
- { upgrade_type: full, force_apt_get: True }
- name: (Re-)Install aptitude, run same tests again
apt:
pkg: aptitude
state: present
- include_tasks: "upgrade.yml"
vars:
aptitude_present: "{{ True | bool }}"
upgrade_type: "{{ item.upgrade_type }}"
force_apt_get: "{{ item.force_apt_get }}"
with_items:
- { upgrade_type: safe, force_apt_get: False }
- { upgrade_type: full, force_apt_get: False }
- { upgrade_type: safe, force_apt_get: True }
- { upgrade_type: full, force_apt_get: True }
- name: Remove aptitude if not originally present
apt:
pkg: aptitude
state: absent
when:
- aptitude_status.stdout.find('ii') == -1
- block:
- name: Install the foo package with diff=yes
apt:
name: foo
allow_unauthenticated: yes
diff: yes
register: apt_result
- name: Check the content of diff.prepared
assert:
that:
- apt_result is success
- "'The following NEW packages will be installed:\n foo' in apt_result.diff.prepared"
always:
- name: Clean up
apt:
name: foo
state: absent
allow_unauthenticated: yes
- block:
- name: Install foo package version 1.0.0 with force=yes, implies allow_unauthenticated=yes
apt:
name: foo=1.0.0
force: yes
register: apt_result
- name: Check install with dpkg
shell: dpkg-query -l foo
register: dpkg_result
- name: Check if install was successful
assert:
that:
- "apt_result is success"
- "dpkg_result is success"
- "'1.0.0' in dpkg_result.stdout"
always:
- name: Clean up
apt:
name: foo
state: absent
allow_unauthenticated: yes
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 69,034 |
Apt module to support minimum version specification ">="
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
It's a reopening of the closed issue #21449.
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
I would like to be able to specify the minimal version of a package to be installed instead of a (partial) exact match.
For example, I would like to have elasticsearch present on my system in a version equal or greater to 7.6.1 because feature X is present since this version:
- if elasticsearch is not installed: install the latest elasticsearch package
- if elasticsearch is installed in version 7.6.0: install the latest elasticsearch package (7.6.2)
- if elasticsearch is installed in version 7.6.1: do nothing, return ok
- if elasticsearch is installed in version 7.6.2 or more: do nothing, return ok
```yml
- name: Install Elasticsearch.
package:
name: elasticsearch>=7.6.1
state: present
```
##### ACTUAL RESULTS
```
fatal: [poc-elasticsearch]: FAILED! => {"changed": false, "msg": "No package matching 'elasticsearch>' is available"}
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
[apt module](https://docs.ansible.com/ansible/latest/modules/apt_module.html)
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
Answer to https://github.com/ansible/ansible/issues/21449#issuecomment-280158405
> Does apt itself support these? If not, I'm not certain we want to add it as that would require us to parse and compare dpkg version strings. If it does, then I could see us taking a PR to use the apt python bindings to do this... Someone would have to reconcile this with the current usage of the "=" to mean fnmatching, though.
Apt python bindings support the comparison of versions with `apt_pkg.version_compare`:
<!--- HINT: You can also paste gist.github.com links for larger files -->
```py
import apt
import apt_pkg
cache = apt.Cache()
pkg = cache['elasticsearch']
installed = pkg.installed.version
candidate = pkg.versions[0].version
vc = apt_pkg.version_compare(installed,candidate)
if vc > 0:
print(f'{installed} version > {candidate} version')
elif vc == 0:
print(f'{installed} version = {candidate} version')
elif vc < 0 and pkg.is_upgradable:
print(f'{installed} version < {candidate} version')
```
Documentation: https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html?highlight=version_compare#apt_pkg.version_compare
|
https://github.com/ansible/ansible/issues/69034
|
https://github.com/ansible/ansible/pull/75002
|
a5bea8b2f51ba9d9c001f713ee7e658cf1a96385
|
4a62c4e3e44b01a904aa86e9b87206a24bd41fbc
| 2020-04-20T07:29:38Z |
python
| 2022-01-11T14:41:12Z |
test/integration/targets/setup_deb_repo/files/package_specs/stable/foo-1.0.0
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 69,034 |
Apt module to support minimum version specification ">="
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
It's a reopening of the closed issue #21449.
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
I would like to be able to specify the minimal version of a package to be installed instead of a (partial) exact match.
For example, I would like to have elasticsearch present on my system in a version equal or greater to 7.6.1 because feature X is present since this version:
- if elasticsearch is not installed: install the latest elasticsearch package
- if elasticsearch is installed in version 7.6.0: install the latest elasticsearch package (7.6.2)
- if elasticsearch is installed in version 7.6.1: do nothing, return ok
- if elasticsearch is installed in version 7.6.2 or more: do nothing, return ok
```yml
- name: Install Elasticsearch.
package:
name: elasticsearch>=7.6.1
state: present
```
##### ACTUAL RESULTS
```
fatal: [poc-elasticsearch]: FAILED! => {"changed": false, "msg": "No package matching 'elasticsearch>' is available"}
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
[apt module](https://docs.ansible.com/ansible/latest/modules/apt_module.html)
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
Answer to https://github.com/ansible/ansible/issues/21449#issuecomment-280158405
> Does apt itself support these? If not, I'm not certain we want to add it as that would require us to parse and compare dpkg version strings. If it does, then I could see us taking a PR to use the apt python bindings to do this... Someone would have to reconcile this with the current usage of the "=" to mean fnmatching, though.
Apt python bindings support the comparison of versions with `apt_pkg.version_compare`:
<!--- HINT: You can also paste gist.github.com links for larger files -->
```py
import apt
import apt_pkg
cache = apt.Cache()
pkg = cache['elasticsearch']
installed = pkg.installed.version
candidate = pkg.versions[0].version
vc = apt_pkg.version_compare(installed,candidate)
if vc > 0:
print(f'{installed} version > {candidate} version')
elif vc == 0:
print(f'{installed} version = {candidate} version')
elif vc < 0 and pkg.is_upgradable:
print(f'{installed} version < {candidate} version')
```
Documentation: https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html?highlight=version_compare#apt_pkg.version_compare
|
https://github.com/ansible/ansible/issues/69034
|
https://github.com/ansible/ansible/pull/75002
|
a5bea8b2f51ba9d9c001f713ee7e658cf1a96385
|
4a62c4e3e44b01a904aa86e9b87206a24bd41fbc
| 2020-04-20T07:29:38Z |
python
| 2022-01-11T14:41:12Z |
test/integration/targets/setup_deb_repo/files/package_specs/stable/foo-1.0.1
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 69,034 |
Apt module to support minimum version specification ">="
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
It's a reopening of the closed issue #21449.
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
I would like to be able to specify the minimal version of a package to be installed instead of a (partial) exact match.
For example, I would like to have elasticsearch present on my system in a version equal or greater to 7.6.1 because feature X is present since this version:
- if elasticsearch is not installed: install the latest elasticsearch package
- if elasticsearch is installed in version 7.6.0: install the latest elasticsearch package (7.6.2)
- if elasticsearch is installed in version 7.6.1: do nothing, return ok
- if elasticsearch is installed in version 7.6.2 or more: do nothing, return ok
```yml
- name: Install Elasticsearch.
package:
name: elasticsearch>=7.6.1
state: present
```
##### ACTUAL RESULTS
```
fatal: [poc-elasticsearch]: FAILED! => {"changed": false, "msg": "No package matching 'elasticsearch>' is available"}
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
[apt module](https://docs.ansible.com/ansible/latest/modules/apt_module.html)
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
Answer to https://github.com/ansible/ansible/issues/21449#issuecomment-280158405
> Does apt itself support these? If not, I'm not certain we want to add it as that would require us to parse and compare dpkg version strings. If it does, then I could see us taking a PR to use the apt python bindings to do this... Someone would have to reconcile this with the current usage of the "=" to mean fnmatching, though.
Apt python bindings support the comparison of versions with `apt_pkg.version_compare`:
<!--- HINT: You can also paste gist.github.com links for larger files -->
```py
import apt
import apt_pkg
cache = apt.Cache()
pkg = cache['elasticsearch']
installed = pkg.installed.version
candidate = pkg.versions[0].version
vc = apt_pkg.version_compare(installed,candidate)
if vc > 0:
print(f'{installed} version > {candidate} version')
elif vc == 0:
print(f'{installed} version = {candidate} version')
elif vc < 0 and pkg.is_upgradable:
print(f'{installed} version < {candidate} version')
```
Documentation: https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html?highlight=version_compare#apt_pkg.version_compare
|
https://github.com/ansible/ansible/issues/69034
|
https://github.com/ansible/ansible/pull/75002
|
a5bea8b2f51ba9d9c001f713ee7e658cf1a96385
|
4a62c4e3e44b01a904aa86e9b87206a24bd41fbc
| 2020-04-20T07:29:38Z |
python
| 2022-01-11T14:41:12Z |
test/integration/targets/setup_deb_repo/files/package_specs/stable/foobar-1.0.0
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 69,034 |
Apt module to support minimum version specification ">="
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
It's a reopening of the closed issue #21449.
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
I would like to be able to specify the minimal version of a package to be installed instead of a (partial) exact match.
For example, I would like to have elasticsearch present on my system in a version equal or greater to 7.6.1 because feature X is present since this version:
- if elasticsearch is not installed: install the latest elasticsearch package
- if elasticsearch is installed in version 7.6.0: install the latest elasticsearch package (7.6.2)
- if elasticsearch is installed in version 7.6.1: do nothing, return ok
- if elasticsearch is installed in version 7.6.2 or more: do nothing, return ok
```yml
- name: Install Elasticsearch.
package:
name: elasticsearch>=7.6.1
state: present
```
##### ACTUAL RESULTS
```
fatal: [poc-elasticsearch]: FAILED! => {"changed": false, "msg": "No package matching 'elasticsearch>' is available"}
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
[apt module](https://docs.ansible.com/ansible/latest/modules/apt_module.html)
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
Answer to https://github.com/ansible/ansible/issues/21449#issuecomment-280158405
> Does apt itself support these? If not, I'm not certain we want to add it as that would require us to parse and compare dpkg version strings. If it does, then I could see us taking a PR to use the apt python bindings to do this... Someone would have to reconcile this with the current usage of the "=" to mean fnmatching, though.
Apt python bindings support the comparison of versions with `apt_pkg.version_compare`:
<!--- HINT: You can also paste gist.github.com links for larger files -->
```py
import apt
import apt_pkg
cache = apt.Cache()
pkg = cache['elasticsearch']
installed = pkg.installed.version
candidate = pkg.versions[0].version
vc = apt_pkg.version_compare(installed,candidate)
if vc > 0:
print(f'{installed} version > {candidate} version')
elif vc == 0:
print(f'{installed} version = {candidate} version')
elif vc < 0 and pkg.is_upgradable:
print(f'{installed} version < {candidate} version')
```
Documentation: https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html?highlight=version_compare#apt_pkg.version_compare
|
https://github.com/ansible/ansible/issues/69034
|
https://github.com/ansible/ansible/pull/75002
|
a5bea8b2f51ba9d9c001f713ee7e658cf1a96385
|
4a62c4e3e44b01a904aa86e9b87206a24bd41fbc
| 2020-04-20T07:29:38Z |
python
| 2022-01-11T14:41:12Z |
test/integration/targets/setup_deb_repo/files/package_specs/stable/foobar-1.0.1
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 69,034 |
Apt module to support minimum version specification ">="
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
It's a reopening of the closed issue #21449.
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
I would like to be able to specify the minimal version of a package to be installed instead of a (partial) exact match.
For example, I would like to have elasticsearch present on my system in a version equal or greater to 7.6.1 because feature X is present since this version:
- if elasticsearch is not installed: install the latest elasticsearch package
- if elasticsearch is installed in version 7.6.0: install the latest elasticsearch package (7.6.2)
- if elasticsearch is installed in version 7.6.1: do nothing, return ok
- if elasticsearch is installed in version 7.6.2 or more: do nothing, return ok
```yml
- name: Install Elasticsearch.
package:
name: elasticsearch>=7.6.1
state: present
```
##### ACTUAL RESULTS
```
fatal: [poc-elasticsearch]: FAILED! => {"changed": false, "msg": "No package matching 'elasticsearch>' is available"}
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
[apt module](https://docs.ansible.com/ansible/latest/modules/apt_module.html)
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
Answer to https://github.com/ansible/ansible/issues/21449#issuecomment-280158405
> Does apt itself support these? If not, I'm not certain we want to add it as that would require us to parse and compare dpkg version strings. If it does, then I could see us taking a PR to use the apt python bindings to do this... Someone would have to reconcile this with the current usage of the "=" to mean fnmatching, though.
Apt python bindings support the comparison of versions with `apt_pkg.version_compare`:
<!--- HINT: You can also paste gist.github.com links for larger files -->
```py
import apt
import apt_pkg
cache = apt.Cache()
pkg = cache['elasticsearch']
installed = pkg.installed.version
candidate = pkg.versions[0].version
vc = apt_pkg.version_compare(installed,candidate)
if vc > 0:
print(f'{installed} version > {candidate} version')
elif vc == 0:
print(f'{installed} version = {candidate} version')
elif vc < 0 and pkg.is_upgradable:
print(f'{installed} version < {candidate} version')
```
Documentation: https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html?highlight=version_compare#apt_pkg.version_compare
|
https://github.com/ansible/ansible/issues/69034
|
https://github.com/ansible/ansible/pull/75002
|
a5bea8b2f51ba9d9c001f713ee7e658cf1a96385
|
4a62c4e3e44b01a904aa86e9b87206a24bd41fbc
| 2020-04-20T07:29:38Z |
python
| 2022-01-11T14:41:12Z |
test/integration/targets/setup_deb_repo/files/package_specs/testing/foo-2.0.0
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 69,034 |
Apt module to support minimum version specification ">="
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
It's a reopening of the closed issue #21449.
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
I would like to be able to specify the minimal version of a package to be installed instead of a (partial) exact match.
For example, I would like to have elasticsearch present on my system in a version equal or greater to 7.6.1 because feature X is present since this version:
- if elasticsearch is not installed: install the latest elasticsearch package
- if elasticsearch is installed in version 7.6.0: install the latest elasticsearch package (7.6.2)
- if elasticsearch is installed in version 7.6.1: do nothing, return ok
- if elasticsearch is installed in version 7.6.2 or more: do nothing, return ok
```yml
- name: Install Elasticsearch.
package:
name: elasticsearch>=7.6.1
state: present
```
##### ACTUAL RESULTS
```
fatal: [poc-elasticsearch]: FAILED! => {"changed": false, "msg": "No package matching 'elasticsearch>' is available"}
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
[apt module](https://docs.ansible.com/ansible/latest/modules/apt_module.html)
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
Answer to https://github.com/ansible/ansible/issues/21449#issuecomment-280158405
> Does apt itself support these? If not, I'm not certain we want to add it as that would require us to parse and compare dpkg version strings. If it does, then I could see us taking a PR to use the apt python bindings to do this... Someone would have to reconcile this with the current usage of the "=" to mean fnmatching, though.
Apt python bindings support the comparison of versions with `apt_pkg.version_compare`:
<!--- HINT: You can also paste gist.github.com links for larger files -->
```py
import apt
import apt_pkg
cache = apt.Cache()
pkg = cache['elasticsearch']
installed = pkg.installed.version
candidate = pkg.versions[0].version
vc = apt_pkg.version_compare(installed,candidate)
if vc > 0:
print(f'{installed} version > {candidate} version')
elif vc == 0:
print(f'{installed} version = {candidate} version')
elif vc < 0 and pkg.is_upgradable:
print(f'{installed} version < {candidate} version')
```
Documentation: https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html?highlight=version_compare#apt_pkg.version_compare
|
https://github.com/ansible/ansible/issues/69034
|
https://github.com/ansible/ansible/pull/75002
|
a5bea8b2f51ba9d9c001f713ee7e658cf1a96385
|
4a62c4e3e44b01a904aa86e9b87206a24bd41fbc
| 2020-04-20T07:29:38Z |
python
| 2022-01-11T14:41:12Z |
test/integration/targets/setup_deb_repo/files/package_specs/testing/foo-2.0.1
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 69,034 |
Apt module to support minimum version specification ">="
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
It's a reopening of the closed issue #21449.
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
I would like to be able to specify the minimal version of a package to be installed instead of a (partial) exact match.
For example, I would like to have elasticsearch present on my system in a version equal or greater to 7.6.1 because feature X is present since this version:
- if elasticsearch is not installed: install the latest elasticsearch package
- if elasticsearch is installed in version 7.6.0: install the latest elasticsearch package (7.6.2)
- if elasticsearch is installed in version 7.6.1: do nothing, return ok
- if elasticsearch is installed in version 7.6.2 or more: do nothing, return ok
```yml
- name: Install Elasticsearch.
package:
name: elasticsearch>=7.6.1
state: present
```
##### ACTUAL RESULTS
```
fatal: [poc-elasticsearch]: FAILED! => {"changed": false, "msg": "No package matching 'elasticsearch>' is available"}
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
[apt module](https://docs.ansible.com/ansible/latest/modules/apt_module.html)
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
Answer to https://github.com/ansible/ansible/issues/21449#issuecomment-280158405
> Does apt itself support these? If not, I'm not certain we want to add it as that would require us to parse and compare dpkg version strings. If it does, then I could see us taking a PR to use the apt python bindings to do this... Someone would have to reconcile this with the current usage of the "=" to mean fnmatching, though.
Apt python bindings support the comparison of versions with `apt_pkg.version_compare`:
<!--- HINT: You can also paste gist.github.com links for larger files -->
```py
import apt
import apt_pkg
cache = apt.Cache()
pkg = cache['elasticsearch']
installed = pkg.installed.version
candidate = pkg.versions[0].version
vc = apt_pkg.version_compare(installed,candidate)
if vc > 0:
print(f'{installed} version > {candidate} version')
elif vc == 0:
print(f'{installed} version = {candidate} version')
elif vc < 0 and pkg.is_upgradable:
print(f'{installed} version < {candidate} version')
```
Documentation: https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html?highlight=version_compare#apt_pkg.version_compare
|
https://github.com/ansible/ansible/issues/69034
|
https://github.com/ansible/ansible/pull/75002
|
a5bea8b2f51ba9d9c001f713ee7e658cf1a96385
|
4a62c4e3e44b01a904aa86e9b87206a24bd41fbc
| 2020-04-20T07:29:38Z |
python
| 2022-01-11T14:41:12Z |
test/integration/targets/setup_deb_repo/tasks/main.yml
|
- block:
- name: Install needed packages
apt:
name: "{{ item }}"
with_items:
- dpkg-dev
- equivs
- libfile-fcntllock-perl # to silence warning by equivs-build
- set_fact:
repodir: /tmp/repo/
- name: Create repo dir
file:
path: "{{ repodir }}"
state: directory
mode: 0755
- name: Copy package specs to remote
copy:
src: "{{ item }}"
dest: "{{ remote_tmp_dir }}/{{ item | basename }}"
with_fileglob:
- "files/package_specs/*"
- name: Create deb files
shell: "equivs-build {{ remote_tmp_dir }}/{{ item | basename }}"
args:
chdir: "{{ repodir }}"
with_fileglob:
- "files/package_specs/*"
- name: Create repo
shell: dpkg-scanpackages --multiversion . /dev/null | gzip -9c > Packages.gz
args:
chdir: "{{ repodir }}"
# Can't use apt_repository as it doesn't expose a trusted=yes option
- name: Install the repo
copy:
content: deb [trusted=yes] file:{{ repodir }} ./
dest: /etc/apt/sources.list.d/file_tmp_repo.list
# Need to uncomment the deb-src for the universe component for build-dep state
- name: Ensure deb-src for the universe component
lineinfile:
path: /etc/apt/sources.list
backrefs: True
regexp: ^#\s*deb-src http://archive\.ubuntu\.com/ubuntu/ (\w*){{ item }} universe$
line: deb-src http://archive.ubuntu.com/ubuntu \1{{ item }} universe
state: present
with_items:
- ''
- -updates
when: ansible_distribution in ['Ubuntu', 'Debian']
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,672 |
end_play not behave the same after upgrade to Ansible core 2.12
|
### Summary
In Ansible core 2.11.6 environment, `end_play` will exit the current play and continue to run the following plays. While in Ansible core 2.12.1 environment, `end_play` will exit the entire playbook. Is this the expected result of the change? Thanks.
### Issue Type
Bug Report
### Component Name
meta
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.1]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/site-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.8.12 (default, Dec 12 2021, 11:39:22) [GCC 7.3.0]
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
No changes.
```
### OS / Environment
Ubuntu 18.04
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: play1
hosts: localhost
gather_facts: no
tasks:
- debug:
msg: "play 1 task 1"
- meta: end_play
- debug:
msg: "play 1 task 2"
- name: play2
hosts: localhost
gather_facts: no
tasks:
- debug:
msg: "play 2 task 1"
- debug:
msg: "play 2 task 2"
```
### Expected Results
The task in the first play not executed after `end_play` and tasks in play 2 are executed.
### Actual Results
```console
PLAYBOOK: test.yml ***********************************************************************************************************************
2 plays in test.yml
PLAY [play1] *****************************************************************************************************************************
META: ran handlers
TASK [debug] *****************************************************************************************************************************
task path: /root/workspace/test_yml/test_end_play/test.yml:5
ok: [localhost] => {
"msg": "play 1 task 1"
}
TASK [meta] ******************************************************************************************************************************
task path: /root/workspace/test_yml/test_end_play/test.yml:7
META: ending play
PLAY RECAP *******************************************************************************************************************************
localhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
While in Ansible core 2.11.6 environment:
```
PLAYBOOK: test.yml ***********************************************************************************************************************
2 plays in test.yml
PLAY [play1] *****************************************************************************************************************************
META: ran handlers
TASK [debug] *****************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:5
ok: [localhost] => {
"msg": "play 1 task 1"
}
TASK [meta] ******************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:7
META: ending play
PLAY [play2] *****************************************************************************************************************************
META: ran handlers
TASK [debug] *****************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:15
ok: [localhost] => {
"msg": "play 2 task 1"
}
TASK [debug] *****************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:18
ok: [localhost] => {
"msg": "play 2 task 2"
}
META: ran handlers
META: ran handlers
PLAY RECAP *******************************************************************************************************************************
localhost : ok=3 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76672
|
https://github.com/ansible/ansible/pull/76674
|
4a62c4e3e44b01a904aa86e9b87206a24bd41fbc
|
f78deccec2d4b5447f32d4fc67eaa549f479ccaa
| 2022-01-07T03:48:25Z |
python
| 2022-01-11T15:27:42Z |
changelogs/fragments/76672-fix-end_play-multiple_plays.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,672 |
end_play not behave the same after upgrade to Ansible core 2.12
|
### Summary
In Ansible core 2.11.6 environment, `end_play` will exit the current play and continue to run the following plays. While in Ansible core 2.12.1 environment, `end_play` will exit the entire playbook. Is this the expected result of the change? Thanks.
### Issue Type
Bug Report
### Component Name
meta
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.1]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/site-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.8.12 (default, Dec 12 2021, 11:39:22) [GCC 7.3.0]
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
No changes.
```
### OS / Environment
Ubuntu 18.04
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: play1
hosts: localhost
gather_facts: no
tasks:
- debug:
msg: "play 1 task 1"
- meta: end_play
- debug:
msg: "play 1 task 2"
- name: play2
hosts: localhost
gather_facts: no
tasks:
- debug:
msg: "play 2 task 1"
- debug:
msg: "play 2 task 2"
```
### Expected Results
The task in the first play not executed after `end_play` and tasks in play 2 are executed.
### Actual Results
```console
PLAYBOOK: test.yml ***********************************************************************************************************************
2 plays in test.yml
PLAY [play1] *****************************************************************************************************************************
META: ran handlers
TASK [debug] *****************************************************************************************************************************
task path: /root/workspace/test_yml/test_end_play/test.yml:5
ok: [localhost] => {
"msg": "play 1 task 1"
}
TASK [meta] ******************************************************************************************************************************
task path: /root/workspace/test_yml/test_end_play/test.yml:7
META: ending play
PLAY RECAP *******************************************************************************************************************************
localhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
While in Ansible core 2.11.6 environment:
```
PLAYBOOK: test.yml ***********************************************************************************************************************
2 plays in test.yml
PLAY [play1] *****************************************************************************************************************************
META: ran handlers
TASK [debug] *****************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:5
ok: [localhost] => {
"msg": "play 1 task 1"
}
TASK [meta] ******************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:7
META: ending play
PLAY [play2] *****************************************************************************************************************************
META: ran handlers
TASK [debug] *****************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:15
ok: [localhost] => {
"msg": "play 2 task 1"
}
TASK [debug] *****************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:18
ok: [localhost] => {
"msg": "play 2 task 2"
}
META: ran handlers
META: ran handlers
PLAY RECAP *******************************************************************************************************************************
localhost : ok=3 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76672
|
https://github.com/ansible/ansible/pull/76674
|
4a62c4e3e44b01a904aa86e9b87206a24bd41fbc
|
f78deccec2d4b5447f32d4fc67eaa549f479ccaa
| 2022-01-07T03:48:25Z |
python
| 2022-01-11T15:27:42Z |
lib/ansible/executor/playbook_executor.py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible import context
from ansible.executor.task_queue_manager import TaskQueueManager, AnsibleEndPlay
from ansible.module_utils._text import to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.loader import become_loader, connection_loader, shell_loader
from ansible.playbook import Playbook
from ansible.template import Templar
from ansible.utils.helpers import pct_to_int
from ansible.utils.collection_loader import AnsibleCollectionConfig
from ansible.utils.collection_loader._collection_finder import _get_collection_name_from_path, _get_collection_playbook_path
from ansible.utils.path import makedirs_safe
from ansible.utils.ssh_functions import set_default_transport
from ansible.utils.display import Display
display = Display()
class PlaybookExecutor:
'''
This is the primary class for executing playbooks, and thus the
basis for bin/ansible-playbook operation.
'''
def __init__(self, playbooks, inventory, variable_manager, loader, passwords):
self._playbooks = playbooks
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self.passwords = passwords
self._unreachable_hosts = dict()
if context.CLIARGS.get('listhosts') or context.CLIARGS.get('listtasks') or \
context.CLIARGS.get('listtags') or context.CLIARGS.get('syntax'):
self._tqm = None
else:
self._tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=self.passwords,
forks=context.CLIARGS.get('forks'),
)
# Note: We run this here to cache whether the default ansible ssh
# executable supports control persist. Sometime in the future we may
# need to enhance this to check that ansible_ssh_executable specified
# in inventory is also cached. We can't do this caching at the point
# where it is used (in task_executor) because that is post-fork and
# therefore would be discarded after every task.
set_default_transport()
def run(self):
'''
Run the given playbook, based on the settings in the play which
may limit the runs to serialized groups, etc.
'''
result = 0
entrylist = []
entry = {}
try:
# preload become/connection/shell to set config defs cached
list(connection_loader.all(class_only=True))
list(shell_loader.all(class_only=True))
list(become_loader.all(class_only=True))
for playbook in self._playbooks:
# deal with FQCN
resource = _get_collection_playbook_path(playbook)
if resource is not None:
playbook_path = resource[1]
playbook_collection = resource[2]
else:
playbook_path = playbook
# not fqcn, but might still be colleciotn playbook
playbook_collection = _get_collection_name_from_path(playbook)
if playbook_collection:
display.warning("running playbook inside collection {0}".format(playbook_collection))
AnsibleCollectionConfig.default_collection = playbook_collection
else:
AnsibleCollectionConfig.default_collection = None
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
# FIXME: move out of inventory self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
if self._tqm is None: # we are doing a listing
entry = {'playbook': playbook_path}
entry['plays'] = []
else:
# make sure the tqm has callbacks loaded
self._tqm.load_callbacks()
self._tqm.send_callback('v2_playbook_on_start', pb)
i = 1
plays = pb.get_plays()
display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path)))
for play in plays:
if play._included_path is not None:
self._loader.set_basedir(play._included_path)
else:
self._loader.set_basedir(pb._basedir)
# clear any filters which may have been applied to the inventory
self._inventory.remove_restriction()
# Allow variables to be used in vars_prompt fields.
all_vars = self._variable_manager.get_vars(play=play)
templar = Templar(loader=self._loader, variables=all_vars)
setattr(play, 'vars_prompt', templar.template(play.vars_prompt))
# FIXME: this should be a play 'sub object' like loop_control
if play.vars_prompt:
for var in play.vars_prompt:
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = boolean(var.get("private", True))
confirm = boolean(var.get("confirm", False))
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
unsafe = var.get("unsafe", None)
if vname not in self._variable_manager.extra_vars:
if self._tqm:
self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt,
default, unsafe)
play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
else: # we are either in --list-<option> or syntax check
play.vars[vname] = default
# Post validate so any play level variables are templated
all_vars = self._variable_manager.get_vars(play=play)
templar = Templar(loader=self._loader, variables=all_vars)
play.post_validate(templar)
if context.CLIARGS['syntax']:
continue
if self._tqm is None:
# we are just doing a listing
entry['plays'].append(play)
else:
self._tqm._unreachable_hosts.update(self._unreachable_hosts)
previously_failed = len(self._tqm._failed_hosts)
previously_unreachable = len(self._tqm._unreachable_hosts)
break_play = False
# we are actually running plays
batches = self._get_serialized_batches(play)
if len(batches) == 0:
self._tqm.send_callback('v2_playbook_on_play_start', play)
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
for batch in batches:
# restrict the inventory to the hosts in the serialized batch
self._inventory.restrict_to_hosts(batch)
# and run it...
try:
result = self._tqm.run(play=play)
except AnsibleEndPlay as e:
result = e.result
break_play = True
break
# break the play if the result equals the special return code
if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0:
result = self._tqm.RUN_FAILED_HOSTS
break_play = True
# check the number of failures here, to see if they're above the maximum
# failure percentage allowed, or if any errors are fatal. If either of those
# conditions are met, we break out, otherwise we only break out if the entire
# batch failed
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
(previously_failed + previously_unreachable)
if len(batch) == failed_hosts_count:
break_play = True
break
# update the previous counts so they don't accumulate incorrectly
# over multiple serial batches
previously_failed += len(self._tqm._failed_hosts) - previously_failed
previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable
# save the unreachable hosts from this batch
self._unreachable_hosts.update(self._tqm._unreachable_hosts)
if break_play:
break
i = i + 1 # per play
if entry:
entrylist.append(entry) # per playbook
# send the stats callback for this playbook
if self._tqm is not None:
if C.RETRY_FILES_ENABLED:
retries = set(self._tqm._failed_hosts.keys())
retries.update(self._tqm._unreachable_hosts.keys())
retries = sorted(retries)
if len(retries) > 0:
if C.RETRY_FILES_SAVE_PATH:
basedir = C.RETRY_FILES_SAVE_PATH
elif playbook_path:
basedir = os.path.dirname(os.path.abspath(playbook_path))
else:
basedir = '~/'
(retry_name, _) = os.path.splitext(os.path.basename(playbook_path))
filename = os.path.join(basedir, "%s.retry" % retry_name)
if self._generate_retry_inventory(filename, retries):
display.display("\tto retry, use: --limit @%s\n" % filename)
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
# if the last result wasn't zero, break out of the playbook file name loop
if result != 0:
break
if entrylist:
return entrylist
finally:
if self._tqm is not None:
self._tqm.cleanup()
if self._loader:
self._loader.cleanup_all_tmp_files()
if context.CLIARGS['syntax']:
display.display("No issues encountered")
return result
if context.CLIARGS['start_at_task'] and not self._tqm._start_at_done:
display.error(
"No matching task \"%s\" found."
" Note: --start-at-task can only follow static includes."
% context.CLIARGS['start_at_task']
)
return result
def _get_serialized_batches(self, play):
'''
Returns a list of hosts, subdivided into batches based on
the serial size specified in the play.
'''
# make sure we have a unique list of hosts
all_hosts = self._inventory.get_hosts(play.hosts, order=play.order)
all_hosts_len = len(all_hosts)
# the serial value can be listed as a scalar or a list of
# scalars, so we make sure it's a list here
serial_batch_list = play.serial
if len(serial_batch_list) == 0:
serial_batch_list = [-1]
cur_item = 0
serialized_batches = []
while len(all_hosts) > 0:
# get the serial value from current item in the list
serial = pct_to_int(serial_batch_list[cur_item], all_hosts_len)
# if the serial count was not specified or is invalid, default to
# a list of all hosts, otherwise grab a chunk of the hosts equal
# to the current serial item size
if serial <= 0:
serialized_batches.append(all_hosts)
break
else:
play_hosts = []
for x in range(serial):
if len(all_hosts) > 0:
play_hosts.append(all_hosts.pop(0))
serialized_batches.append(play_hosts)
# increment the current batch list item number, and if we've hit
# the end keep using the last element until we've consumed all of
# the hosts in the inventory
cur_item += 1
if cur_item > len(serial_batch_list) - 1:
cur_item = len(serial_batch_list) - 1
return serialized_batches
def _generate_retry_inventory(self, retry_path, replay_hosts):
'''
Called when a playbook run fails. It generates an inventory which allows
re-running on ONLY the failed hosts. This may duplicate some variable
information in group_vars/host_vars but that is ok, and expected.
'''
try:
makedirs_safe(os.path.dirname(retry_path))
with open(retry_path, 'w') as fd:
for x in replay_hosts:
fd.write("%s\n" % x)
except Exception as e:
display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_text(e)))
return False
return True
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,672 |
end_play not behave the same after upgrade to Ansible core 2.12
|
### Summary
In Ansible core 2.11.6 environment, `end_play` will exit the current play and continue to run the following plays. While in Ansible core 2.12.1 environment, `end_play` will exit the entire playbook. Is this the expected result of the change? Thanks.
### Issue Type
Bug Report
### Component Name
meta
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.1]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/site-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.8.12 (default, Dec 12 2021, 11:39:22) [GCC 7.3.0]
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
No changes.
```
### OS / Environment
Ubuntu 18.04
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: play1
hosts: localhost
gather_facts: no
tasks:
- debug:
msg: "play 1 task 1"
- meta: end_play
- debug:
msg: "play 1 task 2"
- name: play2
hosts: localhost
gather_facts: no
tasks:
- debug:
msg: "play 2 task 1"
- debug:
msg: "play 2 task 2"
```
### Expected Results
The task in the first play not executed after `end_play` and tasks in play 2 are executed.
### Actual Results
```console
PLAYBOOK: test.yml ***********************************************************************************************************************
2 plays in test.yml
PLAY [play1] *****************************************************************************************************************************
META: ran handlers
TASK [debug] *****************************************************************************************************************************
task path: /root/workspace/test_yml/test_end_play/test.yml:5
ok: [localhost] => {
"msg": "play 1 task 1"
}
TASK [meta] ******************************************************************************************************************************
task path: /root/workspace/test_yml/test_end_play/test.yml:7
META: ending play
PLAY RECAP *******************************************************************************************************************************
localhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
While in Ansible core 2.11.6 environment:
```
PLAYBOOK: test.yml ***********************************************************************************************************************
2 plays in test.yml
PLAY [play1] *****************************************************************************************************************************
META: ran handlers
TASK [debug] *****************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:5
ok: [localhost] => {
"msg": "play 1 task 1"
}
TASK [meta] ******************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:7
META: ending play
PLAY [play2] *****************************************************************************************************************************
META: ran handlers
TASK [debug] *****************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:15
ok: [localhost] => {
"msg": "play 2 task 1"
}
TASK [debug] *****************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:18
ok: [localhost] => {
"msg": "play 2 task 2"
}
META: ran handlers
META: ran handlers
PLAY RECAP *******************************************************************************************************************************
localhost : ok=3 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76672
|
https://github.com/ansible/ansible/pull/76674
|
4a62c4e3e44b01a904aa86e9b87206a24bd41fbc
|
f78deccec2d4b5447f32d4fc67eaa549f479ccaa
| 2022-01-07T03:48:25Z |
python
| 2022-01-11T15:27:42Z |
test/integration/targets/meta_tasks/runme.sh
|
#!/usr/bin/env bash
set -eux
# test end_host meta task, with when conditional
for test_strategy in linear free; do
out="$(ansible-playbook test_end_host.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
grep -q "META: end_host conditional evaluated to false, continuing execution for testhost" <<< "$out"
grep -q "META: ending play for testhost2" <<< "$out"
grep -q '"skip_reason": "end_host conditional evaluated to False, continuing execution for testhost"' <<< "$out"
grep -q "play not ended for testhost" <<< "$out"
grep -qv "play not ended for testhost2" <<< "$out"
out="$(ansible-playbook test_end_host_fqcn.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
grep -q "META: end_host conditional evaluated to false, continuing execution for testhost" <<< "$out"
grep -q "META: ending play for testhost2" <<< "$out"
grep -q '"skip_reason": "end_host conditional evaluated to False, continuing execution for testhost"' <<< "$out"
grep -q "play not ended for testhost" <<< "$out"
grep -qv "play not ended for testhost2" <<< "$out"
done
# test end_host meta task, on all hosts
for test_strategy in linear free; do
out="$(ansible-playbook test_end_host_all.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
grep -q "META: ending play for testhost" <<< "$out"
grep -q "META: ending play for testhost2" <<< "$out"
grep -qv "play not ended for testhost" <<< "$out"
grep -qv "play not ended for testhost2" <<< "$out"
out="$(ansible-playbook test_end_host_all_fqcn.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
grep -q "META: ending play for testhost" <<< "$out"
grep -q "META: ending play for testhost2" <<< "$out"
grep -qv "play not ended for testhost" <<< "$out"
grep -qv "play not ended for testhost2" <<< "$out"
done
# test end_play meta task
for test_strategy in linear free; do
out="$(ansible-playbook test_end_play.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
grep -q "META: ending play" <<< "$out"
grep -qv 'Failed to end using end_play' <<< "$out"
out="$(ansible-playbook test_end_play_fqcn.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
grep -q "META: ending play" <<< "$out"
grep -qv 'Failed to end using end_play' <<< "$out"
out="$(ansible-playbook test_end_play_serial_one.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
[ "$(grep -c "Testing end_play on host" <<< "$out" )" -eq 1 ]
grep -q "META: ending play" <<< "$out"
grep -qv 'Failed to end using end_play' <<< "$out"
done
# test end_batch meta task
for test_strategy in linear free; do
out="$(ansible-playbook test_end_batch.yml -i inventory.yml -e test_strategy=$test_strategy -vv "$@")"
[ "$(grep -c "Using end_batch" <<< "$out" )" -eq 2 ]
[ "$(grep -c "META: ending batch" <<< "$out" )" -eq 2 ]
grep -qv 'Failed to end_batch' <<< "$out"
done
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,672 |
end_play not behave the same after upgrade to Ansible core 2.12
|
### Summary
In Ansible core 2.11.6 environment, `end_play` will exit the current play and continue to run the following plays. While in Ansible core 2.12.1 environment, `end_play` will exit the entire playbook. Is this the expected result of the change? Thanks.
### Issue Type
Bug Report
### Component Name
meta
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.1]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/site-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.8.12 (default, Dec 12 2021, 11:39:22) [GCC 7.3.0]
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
No changes.
```
### OS / Environment
Ubuntu 18.04
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: play1
hosts: localhost
gather_facts: no
tasks:
- debug:
msg: "play 1 task 1"
- meta: end_play
- debug:
msg: "play 1 task 2"
- name: play2
hosts: localhost
gather_facts: no
tasks:
- debug:
msg: "play 2 task 1"
- debug:
msg: "play 2 task 2"
```
### Expected Results
The task in the first play not executed after `end_play` and tasks in play 2 are executed.
### Actual Results
```console
PLAYBOOK: test.yml ***********************************************************************************************************************
2 plays in test.yml
PLAY [play1] *****************************************************************************************************************************
META: ran handlers
TASK [debug] *****************************************************************************************************************************
task path: /root/workspace/test_yml/test_end_play/test.yml:5
ok: [localhost] => {
"msg": "play 1 task 1"
}
TASK [meta] ******************************************************************************************************************************
task path: /root/workspace/test_yml/test_end_play/test.yml:7
META: ending play
PLAY RECAP *******************************************************************************************************************************
localhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
While in Ansible core 2.11.6 environment:
```
PLAYBOOK: test.yml ***********************************************************************************************************************
2 plays in test.yml
PLAY [play1] *****************************************************************************************************************************
META: ran handlers
TASK [debug] *****************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:5
ok: [localhost] => {
"msg": "play 1 task 1"
}
TASK [meta] ******************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:7
META: ending play
PLAY [play2] *****************************************************************************************************************************
META: ran handlers
TASK [debug] *****************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:15
ok: [localhost] => {
"msg": "play 2 task 1"
}
TASK [debug] *****************************************************************************************************************************
task path: /root/test_yml/test_end_play/test.yml:18
ok: [localhost] => {
"msg": "play 2 task 2"
}
META: ran handlers
META: ran handlers
PLAY RECAP *******************************************************************************************************************************
localhost : ok=3 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76672
|
https://github.com/ansible/ansible/pull/76674
|
4a62c4e3e44b01a904aa86e9b87206a24bd41fbc
|
f78deccec2d4b5447f32d4fc67eaa549f479ccaa
| 2022-01-07T03:48:25Z |
python
| 2022-01-11T15:27:42Z |
test/integration/targets/meta_tasks/test_end_play_multiple_plays.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 69,721 |
Add AIX default CA certs path to module_utils/urls.py
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
Adding the AIX standard capath to the CA certs search folders
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
- Playbook module: uri
- get_ca_certs in class SSLValidationHandler
- At line: https://github.com/ansible/ansible/blob/stable-2.9/lib/ansible/module_utils/urls.py#L833
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
Problems:
- The **uri** Ansible playbook module fails to GET https in AIX
- Missing the AIX standard CA certs locations
Solution:
We will have better support for the uri Ansible module under AIX managed host
By adding:
```python
elif system == u'AIX':
paths_checked.append('/var/ssl/certs')
paths_checked.append('/opt/freeware/etc/ssl/certs')
```
Those cert paths are part of the package **ca-certificates** downloadable from the IBM Official [AIX Toolbox for Linux® Applications site](https://www.ibm.com/support/pages/aix-toolbox-linux-applications-downloads-alpha#C)
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: GET https page
uri:
url: https://www.google.com
method: GET
return_content: yes
validate_certs: yes
register: get_replay
```
Before the change:
```
TASK [users-access : GET https page] *******************************************************************
fatal: [myserver]: FAILED! => {
"changed": false,
"content": "",
"elapsed": 0,
"invocation": {
"module_args": {
"attributes": null,
"backup": null,
"body": null,
"body_format": "raw",
"client_cert": null,
"client_key": null,
"content": null,
"creates": null,
"delimiter": null,
"dest": null,
"directory_mode": null,
"follow": false,
"follow_redirects": "safe",
"force": false,
"force_basic_auth": false,
"group": null,
"headers": {},
"http_agent": "ansible-httpget",
"method": "GET",
"mode": null,
"owner": null,
"regexp": null,
"remote_src": null,
"removes": null,
"return_content": true,
"selevel": null,
"serole": null,
"setype": null,
"seuser": null,
"src": null,
"status_code": [
200
],
"timeout": 30,
"unix_socket": null,
"unsafe_writes": null,
"url": "https://www.google.com",
"url_password": null,
"url_username": null,
"use_proxy": true,
"validate_certs": true
}
},
"msg": "Status code was -1 and not [200]: Request failed: <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:727)>",
"redirected": false,
"status": -1,
"url": "https://www.google.com"
}
```
After the change:
```
TASK [users-access : GET https page] *******************************************************************
ok: [myserver] => {
"accept_ranges": "none",
"alt_svc": "h3-27=\":443\"; ma=2592000,h3-25=\":443\"; ma=2592000,h3-T050=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q049=\":443\"; ma=2592000,h3-Q048=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"",
"cache_control": "private, max-age=0",
"changed": false,
"connection": "close",
"content": ".....",
"content_type": "text/html; charset=ISO-8859-1",
...
...
"invocation": {
"module_args": {
"attributes": null,
"backup": null,
"body": null,
"body_format": "raw",
"client_cert": null,
"client_key": null,
"content": null,
"creates": null,
"delimiter": null,
"dest": null,
"directory_mode": null,
"follow": false,
"follow_redirects": "safe",
"force": false,
"force_basic_auth": false,
"group": null,
"headers": {},
"http_agent": "ansible-httpget",
"method": "GET",
"mode": null,
"owner": null,
"regexp": null,
"remote_src": null,
"removes": null,
"return_content": true,
"selevel": null,
"serole": null,
"setype": null,
"seuser": null,
"src": null,
"status_code": [
200
],
"timeout": 30,
"unix_socket": null,
"unsafe_writes": null,
"url": "https://www.google.com",
"url_password": null,
"url_username": null,
"use_proxy": true,
"validate_certs": true
}
},
"msg": "OK (unknown bytes)",
"p3p": "CP=\"This is not a P3P policy! See g.co/p3phelp for more info.\"",
"redirected": false,
"server": "gws",
"set_cookie": "...",
"status": 200,
"transfer_encoding": "chunked",
"url": "https://www.google.com",
"vary": "Accept-Encoding",
"x_frame_options": "SAMEORIGIN",
"x_xss_protection": "0"
```
|
https://github.com/ansible/ansible/issues/69721
|
https://github.com/ansible/ansible/pull/69776
|
c1df36e3ae999a32131442d124fd00dd0d649e35
|
71f399677801dabfe3f52910a66cd1b920b3bdba
| 2020-05-27T08:35:14Z |
python
| 2022-01-12T20:19:40Z |
changelogs/fragments/69776-add-aix-root-ca-certs-paths.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 69,721 |
Add AIX default CA certs path to module_utils/urls.py
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
Adding the AIX standard capath to the CA certs search folders
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
- Playbook module: uri
- get_ca_certs in class SSLValidationHandler
- At line: https://github.com/ansible/ansible/blob/stable-2.9/lib/ansible/module_utils/urls.py#L833
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
Problems:
- The **uri** Ansible playbook module fails to GET https in AIX
- Missing the AIX standard CA certs locations
Solution:
We will have better support for the uri Ansible module under AIX managed host
By adding:
```python
elif system == u'AIX':
paths_checked.append('/var/ssl/certs')
paths_checked.append('/opt/freeware/etc/ssl/certs')
```
Those cert paths are part of the package **ca-certificates** downloadable from the IBM Official [AIX Toolbox for Linux® Applications site](https://www.ibm.com/support/pages/aix-toolbox-linux-applications-downloads-alpha#C)
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: GET https page
uri:
url: https://www.google.com
method: GET
return_content: yes
validate_certs: yes
register: get_replay
```
Before the change:
```
TASK [users-access : GET https page] *******************************************************************
fatal: [myserver]: FAILED! => {
"changed": false,
"content": "",
"elapsed": 0,
"invocation": {
"module_args": {
"attributes": null,
"backup": null,
"body": null,
"body_format": "raw",
"client_cert": null,
"client_key": null,
"content": null,
"creates": null,
"delimiter": null,
"dest": null,
"directory_mode": null,
"follow": false,
"follow_redirects": "safe",
"force": false,
"force_basic_auth": false,
"group": null,
"headers": {},
"http_agent": "ansible-httpget",
"method": "GET",
"mode": null,
"owner": null,
"regexp": null,
"remote_src": null,
"removes": null,
"return_content": true,
"selevel": null,
"serole": null,
"setype": null,
"seuser": null,
"src": null,
"status_code": [
200
],
"timeout": 30,
"unix_socket": null,
"unsafe_writes": null,
"url": "https://www.google.com",
"url_password": null,
"url_username": null,
"use_proxy": true,
"validate_certs": true
}
},
"msg": "Status code was -1 and not [200]: Request failed: <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:727)>",
"redirected": false,
"status": -1,
"url": "https://www.google.com"
}
```
After the change:
```
TASK [users-access : GET https page] *******************************************************************
ok: [myserver] => {
"accept_ranges": "none",
"alt_svc": "h3-27=\":443\"; ma=2592000,h3-25=\":443\"; ma=2592000,h3-T050=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q049=\":443\"; ma=2592000,h3-Q048=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"",
"cache_control": "private, max-age=0",
"changed": false,
"connection": "close",
"content": ".....",
"content_type": "text/html; charset=ISO-8859-1",
...
...
"invocation": {
"module_args": {
"attributes": null,
"backup": null,
"body": null,
"body_format": "raw",
"client_cert": null,
"client_key": null,
"content": null,
"creates": null,
"delimiter": null,
"dest": null,
"directory_mode": null,
"follow": false,
"follow_redirects": "safe",
"force": false,
"force_basic_auth": false,
"group": null,
"headers": {},
"http_agent": "ansible-httpget",
"method": "GET",
"mode": null,
"owner": null,
"regexp": null,
"remote_src": null,
"removes": null,
"return_content": true,
"selevel": null,
"serole": null,
"setype": null,
"seuser": null,
"src": null,
"status_code": [
200
],
"timeout": 30,
"unix_socket": null,
"unsafe_writes": null,
"url": "https://www.google.com",
"url_password": null,
"url_username": null,
"use_proxy": true,
"validate_certs": true
}
},
"msg": "OK (unknown bytes)",
"p3p": "CP=\"This is not a P3P policy! See g.co/p3phelp for more info.\"",
"redirected": false,
"server": "gws",
"set_cookie": "...",
"status": 200,
"transfer_encoding": "chunked",
"url": "https://www.google.com",
"vary": "Accept-Encoding",
"x_frame_options": "SAMEORIGIN",
"x_xss_protection": "0"
```
|
https://github.com/ansible/ansible/issues/69721
|
https://github.com/ansible/ansible/pull/69776
|
c1df36e3ae999a32131442d124fd00dd0d649e35
|
71f399677801dabfe3f52910a66cd1b920b3bdba
| 2020-05-27T08:35:14Z |
python
| 2022-01-12T20:19:40Z |
lib/ansible/module_utils/urls.py
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# Copyright (c), Toshio Kuratomi <[email protected]>, 2015
#
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
#
# The match_hostname function and supporting code is under the terms and
# conditions of the Python Software Foundation License. They were taken from
# the Python3 standard library and adapted for use in Python2. See comments in the
# source for which code precisely is under this License.
#
# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
'''
The **urls** utils module offers a replacement for the urllib2 python library.
urllib2 is the python stdlib way to retrieve files from the Internet but it
lacks some security features (around verifying SSL certificates) that users
should care about in most situations. Using the functions in this module corrects
deficiencies in the urllib2 module wherever possible.
There are also third-party libraries (for instance, requests) which can be used
to replace urllib2 with a more secure library. However, all third party libraries
require that the library be installed on the managed machine. That is an extra step
for users making use of a module. If possible, avoid third party libraries by using
this code instead.
'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import atexit
import base64
import email.mime.multipart
import email.mime.nonmultipart
import email.mime.application
import email.parser
import email.utils
import functools
import mimetypes
import netrc
import os
import platform
import re
import socket
import sys
import tempfile
import traceback
from contextlib import contextmanager
try:
import email.policy
except ImportError:
# Py2
import email.generator
try:
import httplib
except ImportError:
# Python 3
import http.client as httplib
import ansible.module_utils.six.moves.http_cookiejar as cookiejar
import ansible.module_utils.six.moves.urllib.request as urllib_request
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible.module_utils.common.collections import Mapping
from ansible.module_utils.six import PY2, PY3, string_types
from ansible.module_utils.six.moves import cStringIO
from ansible.module_utils.basic import get_distribution, missing_required_lib
from ansible.module_utils._text import to_bytes, to_native, to_text
try:
# python3
import urllib.request as urllib_request
from urllib.request import AbstractHTTPHandler, BaseHandler
except ImportError:
# python2
import urllib2 as urllib_request
from urllib2 import AbstractHTTPHandler, BaseHandler
urllib_request.HTTPRedirectHandler.http_error_308 = urllib_request.HTTPRedirectHandler.http_error_307
try:
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse, unquote
HAS_URLPARSE = True
except Exception:
HAS_URLPARSE = False
try:
import ssl
HAS_SSL = True
except Exception:
HAS_SSL = False
try:
# SNI Handling needs python2.7.9's SSLContext
from ssl import create_default_context, SSLContext
HAS_SSLCONTEXT = True
except ImportError:
HAS_SSLCONTEXT = False
# SNI Handling for python < 2.7.9 with urllib3 support
try:
# urllib3>=1.15
HAS_URLLIB3_SSL_WRAP_SOCKET = False
try:
from urllib3.contrib.pyopenssl import PyOpenSSLContext
except ImportError:
from requests.packages.urllib3.contrib.pyopenssl import PyOpenSSLContext
HAS_URLLIB3_PYOPENSSLCONTEXT = True
except ImportError:
# urllib3<1.15,>=1.6
HAS_URLLIB3_PYOPENSSLCONTEXT = False
try:
try:
from urllib3.contrib.pyopenssl import ssl_wrap_socket
except ImportError:
from requests.packages.urllib3.contrib.pyopenssl import ssl_wrap_socket
HAS_URLLIB3_SSL_WRAP_SOCKET = True
except ImportError:
pass
# Select a protocol that includes all secure tls protocols
# Exclude insecure ssl protocols if possible
if HAS_SSL:
# If we can't find extra tls methods, ssl.PROTOCOL_TLSv1 is sufficient
PROTOCOL = ssl.PROTOCOL_TLSv1
if not HAS_SSLCONTEXT and HAS_SSL:
try:
import ctypes
import ctypes.util
except ImportError:
# python 2.4 (likely rhel5 which doesn't have tls1.1 support in its openssl)
pass
else:
libssl_name = ctypes.util.find_library('ssl')
libssl = ctypes.CDLL(libssl_name)
for method in ('TLSv1_1_method', 'TLSv1_2_method'):
try:
libssl[method]
# Found something - we'll let openssl autonegotiate and hope
# the server has disabled sslv2 and 3. best we can do.
PROTOCOL = ssl.PROTOCOL_SSLv23
break
except AttributeError:
pass
del libssl
# The following makes it easier for us to script updates of the bundled backports.ssl_match_hostname
# The bundled backports.ssl_match_hostname should really be moved into its own file for processing
_BUNDLED_METADATA = {"pypi_name": "backports.ssl_match_hostname", "version": "3.7.0.1"}
LOADED_VERIFY_LOCATIONS = set()
HAS_MATCH_HOSTNAME = True
try:
from ssl import match_hostname, CertificateError
except ImportError:
try:
from backports.ssl_match_hostname import match_hostname, CertificateError
except ImportError:
HAS_MATCH_HOSTNAME = False
HAS_CRYPTOGRAPHY = True
try:
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.exceptions import UnsupportedAlgorithm
except ImportError:
HAS_CRYPTOGRAPHY = False
# Old import for GSSAPI authentication, this is not used in urls.py but kept for backwards compatibility.
try:
import urllib_gssapi
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
GSSAPI_IMP_ERR = None
try:
import gssapi
class HTTPGSSAPIAuthHandler(BaseHandler):
""" Handles Negotiate/Kerberos support through the gssapi library. """
AUTH_HEADER_PATTERN = re.compile(r'(?:.*)\s*(Negotiate|Kerberos)\s*([^,]*),?', re.I)
handler_order = 480 # Handle before Digest authentication
def __init__(self, username=None, password=None):
self.username = username
self.password = password
self._context = None
def get_auth_value(self, headers):
auth_match = self.AUTH_HEADER_PATTERN.search(headers.get('www-authenticate', ''))
if auth_match:
return auth_match.group(1), base64.b64decode(auth_match.group(2))
def http_error_401(self, req, fp, code, msg, headers):
# If we've already attempted the auth and we've reached this again then there was a failure.
if self._context:
return
parsed = generic_urlparse(urlparse(req.get_full_url()))
auth_header = self.get_auth_value(headers)
if not auth_header:
return
auth_protocol, in_token = auth_header
username = None
if self.username:
username = gssapi.Name(self.username, name_type=gssapi.NameType.user)
if username and self.password:
if not hasattr(gssapi.raw, 'acquire_cred_with_password'):
raise NotImplementedError("Platform GSSAPI library does not support "
"gss_acquire_cred_with_password, cannot acquire GSSAPI credential with "
"explicit username and password.")
b_password = to_bytes(self.password, errors='surrogate_or_strict')
cred = gssapi.raw.acquire_cred_with_password(username, b_password, usage='initiate').creds
else:
cred = gssapi.Credentials(name=username, usage='initiate')
# Get the peer certificate for the channel binding token if possible (HTTPS). A bug on macOS causes the
# authentication to fail when the CBT is present. Just skip that platform.
cbt = None
cert = getpeercert(fp, True)
if cert and platform.system() != 'Darwin':
cert_hash = get_channel_binding_cert_hash(cert)
if cert_hash:
cbt = gssapi.raw.ChannelBindings(application_data=b"tls-server-end-point:" + cert_hash)
# TODO: We could add another option that is set to include the port in the SPN if desired in the future.
target = gssapi.Name("HTTP@%s" % parsed['hostname'], gssapi.NameType.hostbased_service)
self._context = gssapi.SecurityContext(usage="initiate", name=target, creds=cred, channel_bindings=cbt)
resp = None
while not self._context.complete:
out_token = self._context.step(in_token)
if not out_token:
break
auth_header = '%s %s' % (auth_protocol, to_native(base64.b64encode(out_token)))
req.add_unredirected_header('Authorization', auth_header)
resp = self.parent.open(req)
# The response could contain a token that the client uses to validate the server
auth_header = self.get_auth_value(resp.headers)
if not auth_header:
break
in_token = auth_header[1]
return resp
except ImportError:
GSSAPI_IMP_ERR = traceback.format_exc()
HTTPGSSAPIAuthHandler = None
if not HAS_MATCH_HOSTNAME:
# The following block of code is under the terms and conditions of the
# Python Software Foundation License
"""The match_hostname() function from Python 3.4, essential when using SSL."""
try:
# Divergence: Python-3.7+'s _ssl has this exception type but older Pythons do not
from _ssl import SSLCertVerificationError
CertificateError = SSLCertVerificationError
except ImportError:
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname):
"""Matching according to RFC 6125, section 6.4.3
- Hostnames are compared lower case.
- For IDNA, both dn and hostname must be encoded as IDN A-label (ACE).
- Partial wildcards like 'www*.example.org', multiple wildcards, sole
wildcard or wildcards in labels other then the left-most label are not
supported and a CertificateError is raised.
- A wildcard must match at least one character.
"""
if not dn:
return False
wildcards = dn.count('*')
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
if wildcards > 1:
# Divergence .format() to percent formatting for Python < 2.6
raise CertificateError(
"too many wildcards in certificate DNS name: %s" % repr(dn))
dn_leftmost, sep, dn_remainder = dn.partition('.')
if '*' in dn_remainder:
# Only match wildcard in leftmost segment.
# Divergence .format() to percent formatting for Python < 2.6
raise CertificateError(
"wildcard can only be present in the leftmost label: "
"%s." % repr(dn))
if not sep:
# no right side
# Divergence .format() to percent formatting for Python < 2.6
raise CertificateError(
"sole wildcard without additional labels are not support: "
"%s." % repr(dn))
if dn_leftmost != '*':
# no partial wildcard matching
# Divergence .format() to percent formatting for Python < 2.6
raise CertificateError(
"partial wildcards in leftmost label are not supported: "
"%s." % repr(dn))
hostname_leftmost, sep, hostname_remainder = hostname.partition('.')
if not hostname_leftmost or not sep:
# wildcard must match at least one char
return False
return dn_remainder.lower() == hostname_remainder.lower()
def _inet_paton(ipname):
"""Try to convert an IP address to packed binary form
Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6
support.
"""
# inet_aton() also accepts strings like '1'
# Divergence: We make sure we have native string type for all python versions
try:
b_ipname = to_bytes(ipname, errors='strict')
except UnicodeError:
raise ValueError("%s must be an all-ascii string." % repr(ipname))
# Set ipname in native string format
if sys.version_info < (3,):
n_ipname = b_ipname
else:
n_ipname = ipname
if n_ipname.count('.') == 3:
try:
return socket.inet_aton(n_ipname)
# Divergence: OSError on late python3. socket.error earlier.
# Null bytes generate ValueError on python3(we want to raise
# ValueError anyway), TypeError # earlier
except (OSError, socket.error, TypeError):
pass
try:
return socket.inet_pton(socket.AF_INET6, n_ipname)
# Divergence: OSError on late python3. socket.error earlier.
# Null bytes generate ValueError on python3(we want to raise
# ValueError anyway), TypeError # earlier
except (OSError, socket.error, TypeError):
# Divergence .format() to percent formatting for Python < 2.6
raise ValueError("%s is neither an IPv4 nor an IP6 "
"address." % repr(ipname))
except AttributeError:
# AF_INET6 not available
pass
# Divergence .format() to percent formatting for Python < 2.6
raise ValueError("%s is not an IPv4 address." % repr(ipname))
def _ipaddress_match(ipname, host_ip):
"""Exact matching of IP addresses.
RFC 6125 explicitly doesn't define an algorithm for this
(section 1.7.2 - "Out of Scope").
"""
# OpenSSL may add a trailing newline to a subjectAltName's IP address
ip = _inet_paton(ipname.rstrip())
return ip == host_ip
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed.
The function matches IP addresses rather than dNSNames if hostname is a
valid ipaddress string. IPv4 addresses are supported on all platforms.
IPv6 addresses are supported on platforms with IPv6 support (AF_INET6
and inet_pton).
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
try:
# Divergence: Deal with hostname as bytes
host_ip = _inet_paton(to_text(hostname, errors='strict'))
except UnicodeError:
# Divergence: Deal with hostname as byte strings.
# IP addresses should be all ascii, so we consider it not
# an IP address if this fails
host_ip = None
except ValueError:
# Not an IP address (common case)
host_ip = None
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if host_ip is None and _dnsname_match(value, hostname):
return
dnsnames.append(value)
elif key == 'IP Address':
if host_ip is not None and _ipaddress_match(value, host_ip):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or subjectAltName fields were found")
# End of Python Software Foundation Licensed code
HAS_MATCH_HOSTNAME = True
# This is a dummy cacert provided for macOS since you need at least 1
# ca cert, regardless of validity, for Python on macOS to use the
# keychain functionality in OpenSSL for validating SSL certificates.
# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
b_DUMMY_CA_CERT = b"""-----BEGIN CERTIFICATE-----
MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD
VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD
gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9
gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1
4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj
gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA
FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE
CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z
aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
-----END CERTIFICATE-----
"""
b_PEM_CERT_RE = re.compile(
br'^-----BEGIN CERTIFICATE-----\n.+?-----END CERTIFICATE-----$',
flags=re.M | re.S
)
#
# Exceptions
#
class ConnectionError(Exception):
"""Failed to connect to the server"""
pass
class ProxyError(ConnectionError):
"""Failure to connect because of a proxy"""
pass
class SSLValidationError(ConnectionError):
"""Failure to connect due to SSL validation failing"""
pass
class NoSSLError(SSLValidationError):
"""Needed to connect to an HTTPS url but no ssl library available to verify the certificate"""
pass
class MissingModuleError(Exception):
"""Failed to import 3rd party module required by the caller"""
def __init__(self, message, import_traceback):
super(MissingModuleError, self).__init__(message)
self.import_traceback = import_traceback
# Some environments (Google Compute Engine's CoreOS deploys) do not compile
# against openssl and thus do not have any HTTPS support.
CustomHTTPSConnection = None
CustomHTTPSHandler = None
HTTPSClientAuthHandler = None
UnixHTTPSConnection = None
if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib_request, 'HTTPSHandler'):
class CustomHTTPSConnection(httplib.HTTPSConnection):
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
self.context = None
if HAS_SSLCONTEXT:
self.context = self._context
elif HAS_URLLIB3_PYOPENSSLCONTEXT:
self.context = self._context = PyOpenSSLContext(PROTOCOL)
if self.context and self.cert_file:
self.context.load_cert_chain(self.cert_file, self.key_file)
def connect(self):
"Connect to a host on a given (SSL) port."
if hasattr(self, 'source_address'):
sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
else:
sock = socket.create_connection((self.host, self.port), self.timeout)
server_hostname = self.host
# Note: self._tunnel_host is not available on py < 2.6 but this code
# isn't used on py < 2.6 (lack of create_connection)
if self._tunnel_host:
self.sock = sock
self._tunnel()
server_hostname = self._tunnel_host
if HAS_SSLCONTEXT or HAS_URLLIB3_PYOPENSSLCONTEXT:
self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
self.sock = ssl_wrap_socket(sock, keyfile=self.key_file, cert_reqs=ssl.CERT_NONE, certfile=self.cert_file, ssl_version=PROTOCOL,
server_hostname=server_hostname)
else:
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL)
class CustomHTTPSHandler(urllib_request.HTTPSHandler):
def https_open(self, req):
kwargs = {}
if HAS_SSLCONTEXT:
kwargs['context'] = self._context
return self.do_open(
functools.partial(
CustomHTTPSConnection,
**kwargs
),
req
)
https_request = AbstractHTTPHandler.do_request_
class HTTPSClientAuthHandler(urllib_request.HTTPSHandler):
'''Handles client authentication via cert/key
This is a fairly lightweight extension on HTTPSHandler, and can be used
in place of HTTPSHandler
'''
def __init__(self, client_cert=None, client_key=None, unix_socket=None, **kwargs):
urllib_request.HTTPSHandler.__init__(self, **kwargs)
self.client_cert = client_cert
self.client_key = client_key
self._unix_socket = unix_socket
def https_open(self, req):
return self.do_open(self._build_https_connection, req)
def _build_https_connection(self, host, **kwargs):
kwargs.update({
'cert_file': self.client_cert,
'key_file': self.client_key,
})
try:
kwargs['context'] = self._context
except AttributeError:
pass
if self._unix_socket:
return UnixHTTPSConnection(self._unix_socket)(host, **kwargs)
return httplib.HTTPSConnection(host, **kwargs)
@contextmanager
def unix_socket_patch_httpconnection_connect():
'''Monkey patch ``httplib.HTTPConnection.connect`` to be ``UnixHTTPConnection.connect``
so that when calling ``super(UnixHTTPSConnection, self).connect()`` we get the
correct behavior of creating self.sock for the unix socket
'''
_connect = httplib.HTTPConnection.connect
httplib.HTTPConnection.connect = UnixHTTPConnection.connect
yield
httplib.HTTPConnection.connect = _connect
class UnixHTTPSConnection(httplib.HTTPSConnection):
def __init__(self, unix_socket):
self._unix_socket = unix_socket
def connect(self):
# This method exists simply to ensure we monkeypatch
# httplib.HTTPConnection.connect to call UnixHTTPConnection.connect
with unix_socket_patch_httpconnection_connect():
# Disable pylint check for the super() call. It complains about UnixHTTPSConnection
# being a NoneType because of the initial definition above, but it won't actually
# be a NoneType when this code runs
# pylint: disable=bad-super-call
super(UnixHTTPSConnection, self).connect()
def __call__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
return self
class UnixHTTPConnection(httplib.HTTPConnection):
'''Handles http requests to a unix socket file'''
def __init__(self, unix_socket):
self._unix_socket = unix_socket
def connect(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
self.sock.connect(self._unix_socket)
except OSError as e:
raise OSError('Invalid Socket File (%s): %s' % (self._unix_socket, e))
if self.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
self.sock.settimeout(self.timeout)
def __call__(self, *args, **kwargs):
httplib.HTTPConnection.__init__(self, *args, **kwargs)
return self
class UnixHTTPHandler(urllib_request.HTTPHandler):
'''Handler for Unix urls'''
def __init__(self, unix_socket, **kwargs):
urllib_request.HTTPHandler.__init__(self, **kwargs)
self._unix_socket = unix_socket
def http_open(self, req):
return self.do_open(UnixHTTPConnection(self._unix_socket), req)
class ParseResultDottedDict(dict):
'''
A dict that acts similarly to the ParseResult named tuple from urllib
'''
def __init__(self, *args, **kwargs):
super(ParseResultDottedDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def as_list(self):
'''
Generate a list from this dict, that looks like the ParseResult named tuple
'''
return [self.get(k, None) for k in ('scheme', 'netloc', 'path', 'params', 'query', 'fragment')]
def generic_urlparse(parts):
'''
Returns a dictionary of url parts as parsed by urlparse,
but accounts for the fact that older versions of that
library do not support named attributes (ie. .netloc)
'''
generic_parts = ParseResultDottedDict()
if hasattr(parts, 'netloc'):
# urlparse is newer, just read the fields straight
# from the parts object
generic_parts['scheme'] = parts.scheme
generic_parts['netloc'] = parts.netloc
generic_parts['path'] = parts.path
generic_parts['params'] = parts.params
generic_parts['query'] = parts.query
generic_parts['fragment'] = parts.fragment
generic_parts['username'] = parts.username
generic_parts['password'] = parts.password
hostname = parts.hostname
if hostname and hostname[0] == '[' and '[' in parts.netloc and ']' in parts.netloc:
# Py2.6 doesn't parse IPv6 addresses correctly
hostname = parts.netloc.split(']')[0][1:].lower()
generic_parts['hostname'] = hostname
try:
port = parts.port
except ValueError:
# Py2.6 doesn't parse IPv6 addresses correctly
netloc = parts.netloc.split('@')[-1].split(']')[-1]
if ':' in netloc:
port = netloc.split(':')[1]
if port:
port = int(port)
else:
port = None
generic_parts['port'] = port
else:
# we have to use indexes, and then parse out
# the other parts not supported by indexing
generic_parts['scheme'] = parts[0]
generic_parts['netloc'] = parts[1]
generic_parts['path'] = parts[2]
generic_parts['params'] = parts[3]
generic_parts['query'] = parts[4]
generic_parts['fragment'] = parts[5]
# get the username, password, etc.
try:
netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
match = netloc_re.match(parts[1])
auth = match.group(1)
hostname = match.group(2)
port = match.group(3)
if port:
# the capture group for the port will include the ':',
# so remove it and convert the port to an integer
port = int(port[1:])
if auth:
# the capture group above includes the @, so remove it
# and then split it up based on the first ':' found
auth = auth[:-1]
username, password = auth.split(':', 1)
else:
username = password = None
generic_parts['username'] = username
generic_parts['password'] = password
generic_parts['hostname'] = hostname
generic_parts['port'] = port
except Exception:
generic_parts['username'] = None
generic_parts['password'] = None
generic_parts['hostname'] = parts[1]
generic_parts['port'] = None
return generic_parts
def extract_pem_certs(b_data):
for match in b_PEM_CERT_RE.finditer(b_data):
yield match.group(0)
def get_response_filename(response):
url = response.geturl()
path = urlparse(url)[2]
filename = os.path.basename(path.rstrip('/')) or None
if filename:
filename = unquote(filename)
return response.headers.get_param('filename', header='content-disposition') or filename
def parse_content_type(response):
if PY2:
get_type = response.headers.gettype
get_param = response.headers.getparam
else:
get_type = response.headers.get_content_type
get_param = response.headers.get_param
content_type = (get_type() or 'application/octet-stream').split(',')[0]
main_type, sub_type = content_type.split('/')
charset = (get_param('charset') or 'utf-8').split(',')[0]
return content_type, main_type, sub_type, charset
class RequestWithMethod(urllib_request.Request):
'''
Workaround for using DELETE/PUT/etc with urllib2
Originally contained in library/net_infrastructure/dnsmadeeasy
'''
def __init__(self, url, method, data=None, headers=None, origin_req_host=None, unverifiable=True):
if headers is None:
headers = {}
self._method = method.upper()
urllib_request.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
def get_method(self):
if self._method:
return self._method
else:
return urllib_request.Request.get_method(self)
def RedirectHandlerFactory(follow_redirects=None, validate_certs=True, ca_path=None):
"""This is a class factory that closes over the value of
``follow_redirects`` so that the RedirectHandler class has access to
that value without having to use globals, and potentially cause problems
where ``open_url`` or ``fetch_url`` are used multiple times in a module.
"""
class RedirectHandler(urllib_request.HTTPRedirectHandler):
"""This is an implementation of a RedirectHandler to match the
functionality provided by httplib2. It will utilize the value of
``follow_redirects`` that is passed into ``RedirectHandlerFactory``
to determine how redirects should be handled in urllib2.
"""
def redirect_request(self, req, fp, code, msg, hdrs, newurl):
if not HAS_SSLCONTEXT:
handler = maybe_add_ssl_handler(newurl, validate_certs, ca_path=ca_path)
if handler:
urllib_request._opener.add_handler(handler)
# Preserve urllib2 compatibility
if follow_redirects == 'urllib2':
return urllib_request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl)
# Handle disabled redirects
elif follow_redirects in ['no', 'none', False]:
raise urllib_error.HTTPError(newurl, code, msg, hdrs, fp)
method = req.get_method()
# Handle non-redirect HTTP status or invalid follow_redirects
if follow_redirects in ['all', 'yes', True]:
if code < 300 or code >= 400:
raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
elif follow_redirects == 'safe':
if code < 300 or code >= 400 or method not in ('GET', 'HEAD'):
raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
else:
raise urllib_error.HTTPError(req.get_full_url(), code, msg, hdrs, fp)
try:
# Python 2-3.3
data = req.get_data()
origin_req_host = req.get_origin_req_host()
except AttributeError:
# Python 3.4+
data = req.data
origin_req_host = req.origin_req_host
# Be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
# Suport redirect with payload and original headers
if code in (307, 308):
# Preserve payload and headers
headers = req.headers
else:
# Do not preserve payload and filter headers
data = None
headers = dict((k, v) for k, v in req.headers.items()
if k.lower() not in ("content-length", "content-type", "transfer-encoding"))
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if code == 303 and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if code == 302 and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
if code == 301 and method == 'POST':
method = 'GET'
return RequestWithMethod(newurl,
method=method,
headers=headers,
data=data,
origin_req_host=origin_req_host,
unverifiable=True,
)
return RedirectHandler
def build_ssl_validation_error(hostname, port, paths, exc=None):
'''Inteligently build out the SSLValidationError based on what support
you have installed
'''
msg = [
('Failed to validate the SSL certificate for %s:%s.'
' Make sure your managed systems have a valid CA'
' certificate installed.')
]
if not HAS_SSLCONTEXT:
msg.append('If the website serving the url uses SNI you need'
' python >= 2.7.9 on your managed machine')
msg.append(' (the python executable used (%s) is version: %s)' %
(sys.executable, ''.join(sys.version.splitlines())))
if not HAS_URLLIB3_PYOPENSSLCONTEXT and not HAS_URLLIB3_SSL_WRAP_SOCKET:
msg.append('or you can install the `urllib3`, `pyOpenSSL`,'
' `ndg-httpsclient`, and `pyasn1` python modules')
msg.append('to perform SNI verification in python >= 2.6.')
msg.append('You can use validate_certs=False if you do'
' not need to confirm the servers identity but this is'
' unsafe and not recommended.'
' Paths checked for this platform: %s.')
if exc:
msg.append('The exception msg was: %s.' % to_native(exc))
raise SSLValidationError(' '.join(msg) % (hostname, port, ", ".join(paths)))
def atexit_remove_file(filename):
if os.path.exists(filename):
try:
os.unlink(filename)
except Exception:
# just ignore if we cannot delete, things should be ok
pass
class SSLValidationHandler(urllib_request.BaseHandler):
'''
A custom handler class for SSL validation.
Based on:
http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
http://techknack.net/python-urllib2-handlers/
'''
CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\n"
def __init__(self, hostname, port, ca_path=None):
self.hostname = hostname
self.port = port
self.ca_path = ca_path
def get_ca_certs(self):
# tries to find a valid CA cert in one of the
# standard locations for the current distribution
ca_certs = []
cadata = bytearray()
paths_checked = []
if self.ca_path:
paths_checked = [self.ca_path]
with open(to_bytes(self.ca_path, errors='surrogate_or_strict'), 'rb') as f:
if HAS_SSLCONTEXT:
for b_pem in extract_pem_certs(f.read()):
cadata.extend(
ssl.PEM_cert_to_DER_cert(
to_native(b_pem, errors='surrogate_or_strict')
)
)
return self.ca_path, cadata, paths_checked
if not HAS_SSLCONTEXT:
paths_checked.append('/etc/ssl/certs')
system = to_text(platform.system(), errors='surrogate_or_strict')
# build a list of paths to check for .crt/.pem files
# based on the platform type
if system == u'Linux':
paths_checked.append('/etc/pki/ca-trust/extracted/pem')
paths_checked.append('/etc/pki/tls/certs')
paths_checked.append('/usr/share/ca-certificates/cacert.org')
elif system == u'FreeBSD':
paths_checked.append('/usr/local/share/certs')
elif system == u'OpenBSD':
paths_checked.append('/etc/ssl')
elif system == u'NetBSD':
ca_certs.append('/etc/openssl/certs')
elif system == u'SunOS':
paths_checked.append('/opt/local/etc/openssl/certs')
# fall back to a user-deployed cert in a standard
# location if the OS platform one is not available
paths_checked.append('/etc/ansible')
tmp_path = None
if not HAS_SSLCONTEXT:
tmp_fd, tmp_path = tempfile.mkstemp()
atexit.register(atexit_remove_file, tmp_path)
# Write the dummy ca cert if we are running on macOS
if system == u'Darwin':
if HAS_SSLCONTEXT:
cadata.extend(
ssl.PEM_cert_to_DER_cert(
to_native(b_DUMMY_CA_CERT, errors='surrogate_or_strict')
)
)
else:
os.write(tmp_fd, b_DUMMY_CA_CERT)
# Default Homebrew path for OpenSSL certs
paths_checked.append('/usr/local/etc/openssl')
# for all of the paths, find any .crt or .pem files
# and compile them into single temp file for use
# in the ssl check to speed up the test
for path in paths_checked:
if os.path.exists(path) and os.path.isdir(path):
dir_contents = os.listdir(path)
for f in dir_contents:
full_path = os.path.join(path, f)
if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt', '.pem'):
try:
if full_path not in LOADED_VERIFY_LOCATIONS:
with open(full_path, 'rb') as cert_file:
b_cert = cert_file.read()
if HAS_SSLCONTEXT:
try:
for b_pem in extract_pem_certs(b_cert):
cadata.extend(
ssl.PEM_cert_to_DER_cert(
to_native(b_pem, errors='surrogate_or_strict')
)
)
except Exception:
continue
else:
os.write(tmp_fd, b_cert)
os.write(tmp_fd, b'\n')
except (OSError, IOError):
pass
if HAS_SSLCONTEXT:
default_verify_paths = ssl.get_default_verify_paths()
paths_checked[:0] = [default_verify_paths.capath]
else:
os.close(tmp_fd)
return (tmp_path, cadata, paths_checked)
def validate_proxy_response(self, response, valid_codes=None):
'''
make sure we get back a valid code from the proxy
'''
valid_codes = [200] if valid_codes is None else valid_codes
try:
(http_version, resp_code, msg) = re.match(br'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
if int(resp_code) not in valid_codes:
raise Exception
except Exception:
raise ProxyError('Connection to proxy failed')
def detect_no_proxy(self, url):
'''
Detect if the 'no_proxy' environment variable is set and honor those locations.
'''
env_no_proxy = os.environ.get('no_proxy')
if env_no_proxy:
env_no_proxy = env_no_proxy.split(',')
netloc = urlparse(url).netloc
for host in env_no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# Our requested URL matches something in no_proxy, so don't
# use the proxy for this
return False
return True
def make_context(self, cafile, cadata):
cafile = self.ca_path or cafile
if self.ca_path:
cadata = None
else:
cadata = cadata or None
if HAS_SSLCONTEXT:
context = create_default_context(cafile=cafile)
elif HAS_URLLIB3_PYOPENSSLCONTEXT:
context = PyOpenSSLContext(PROTOCOL)
else:
raise NotImplementedError('Host libraries are too old to support creating an sslcontext')
if cafile or cadata:
context.load_verify_locations(cafile=cafile, cadata=cadata)
return context
def http_request(self, req):
tmp_ca_cert_path, cadata, paths_checked = self.get_ca_certs()
# Detect if 'no_proxy' environment variable is set and if our URL is included
use_proxy = self.detect_no_proxy(req.get_full_url())
https_proxy = os.environ.get('https_proxy')
context = None
try:
context = self.make_context(tmp_ca_cert_path, cadata)
except NotImplementedError:
# We'll make do with no context below
pass
try:
if use_proxy and https_proxy:
proxy_parts = generic_urlparse(urlparse(https_proxy))
port = proxy_parts.get('port') or 443
proxy_hostname = proxy_parts.get('hostname', None)
if proxy_hostname is None or proxy_parts.get('scheme') == '':
raise ProxyError("Failed to parse https_proxy environment variable."
" Please make sure you export https proxy as 'https_proxy=<SCHEME>://<IP_ADDRESS>:<PORT>'")
s = socket.create_connection((proxy_hostname, port))
if proxy_parts.get('scheme') == 'http':
s.sendall(to_bytes(self.CONNECT_COMMAND % (self.hostname, self.port), errors='surrogate_or_strict'))
if proxy_parts.get('username'):
credentials = "%s:%s" % (proxy_parts.get('username', ''), proxy_parts.get('password', ''))
s.sendall(b'Proxy-Authorization: Basic %s\r\n' % base64.b64encode(to_bytes(credentials, errors='surrogate_or_strict')).strip())
s.sendall(b'\r\n')
connect_result = b""
while connect_result.find(b"\r\n\r\n") <= 0:
connect_result += s.recv(4096)
# 128 kilobytes of headers should be enough for everyone.
if len(connect_result) > 131072:
raise ProxyError('Proxy sent too verbose headers. Only 128KiB allowed.')
self.validate_proxy_response(connect_result)
if context:
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
else:
raise ProxyError('Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
else:
s = socket.create_connection((self.hostname, self.port))
if context:
ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
elif HAS_URLLIB3_SSL_WRAP_SOCKET:
ssl_s = ssl_wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL, server_hostname=self.hostname)
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
# close the ssl connection
# ssl_s.unwrap()
s.close()
except (ssl.SSLError, CertificateError) as e:
build_ssl_validation_error(self.hostname, self.port, paths_checked, e)
except socket.error as e:
raise ConnectionError('Failed to connect to %s at port %s: %s' % (self.hostname, self.port, to_native(e)))
return req
https_request = http_request
def maybe_add_ssl_handler(url, validate_certs, ca_path=None):
parsed = generic_urlparse(urlparse(url))
if parsed.scheme == 'https' and validate_certs:
if not HAS_SSL:
raise NoSSLError('SSL validation is not available in your version of python. You can use validate_certs=False,'
' however this is unsafe and not recommended')
# create the SSL validation handler and
# add it to the list of handlers
return SSLValidationHandler(parsed.hostname, parsed.port or 443, ca_path=ca_path)
def getpeercert(response, binary_form=False):
""" Attempt to get the peer certificate of the response from urlopen. """
# The response from urllib2.open() is different across Python 2 and 3
if PY3:
socket = response.fp.raw._sock
else:
socket = response.fp._sock.fp._sock
try:
return socket.getpeercert(binary_form)
except AttributeError:
pass # Not HTTPS
def get_channel_binding_cert_hash(certificate_der):
""" Gets the channel binding app data for a TLS connection using the peer cert. """
if not HAS_CRYPTOGRAPHY:
return
# Logic documented in RFC 5929 section 4 https://tools.ietf.org/html/rfc5929#section-4
cert = x509.load_der_x509_certificate(certificate_der, default_backend())
hash_algorithm = None
try:
hash_algorithm = cert.signature_hash_algorithm
except UnsupportedAlgorithm:
pass
# If the signature hash algorithm is unknown/unsupported or md5/sha1 we must use SHA256.
if not hash_algorithm or hash_algorithm.name in ['md5', 'sha1']:
hash_algorithm = hashes.SHA256()
digest = hashes.Hash(hash_algorithm, default_backend())
digest.update(certificate_der)
return digest.finalize()
def rfc2822_date_string(timetuple, zone='-0000'):
"""Accepts a timetuple and optional zone which defaults to ``-0000``
and returns a date string as specified by RFC 2822, e.g.:
Fri, 09 Nov 2001 01:08:47 -0000
Copied from email.utils.formatdate and modified for separate use
"""
return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]],
timetuple[2],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1],
timetuple[0], timetuple[3], timetuple[4], timetuple[5],
zone)
class Request:
def __init__(self, headers=None, use_proxy=True, force=False, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=False,
follow_redirects='urllib2', client_cert=None, client_key=None, cookies=None, unix_socket=None,
ca_path=None):
"""This class works somewhat similarly to the ``Session`` class of from requests
by defining a cookiejar that an be used across requests as well as cascaded defaults that
can apply to repeated requests
For documentation of params, see ``Request.open``
>>> from ansible.module_utils.urls import Request
>>> r = Request()
>>> r.open('GET', 'http://httpbin.org/cookies/set?k1=v1').read()
'{\n "cookies": {\n "k1": "v1"\n }\n}\n'
>>> r = Request(url_username='user', url_password='passwd')
>>> r.open('GET', 'http://httpbin.org/basic-auth/user/passwd').read()
'{\n "authenticated": true, \n "user": "user"\n}\n'
>>> r = Request(headers=dict(foo='bar'))
>>> r.open('GET', 'http://httpbin.org/get', headers=dict(baz='qux')).read()
"""
self.headers = headers or {}
if not isinstance(self.headers, dict):
raise ValueError("headers must be a dict: %r" % self.headers)
self.use_proxy = use_proxy
self.force = force
self.timeout = timeout
self.validate_certs = validate_certs
self.url_username = url_username
self.url_password = url_password
self.http_agent = http_agent
self.force_basic_auth = force_basic_auth
self.follow_redirects = follow_redirects
self.client_cert = client_cert
self.client_key = client_key
self.unix_socket = unix_socket
self.ca_path = ca_path
if isinstance(cookies, cookiejar.CookieJar):
self.cookies = cookies
else:
self.cookies = cookiejar.CookieJar()
def _fallback(self, value, fallback):
if value is None:
return fallback
return value
def open(self, method, url, data=None, headers=None, use_proxy=None,
force=None, last_mod_time=None, timeout=None, validate_certs=None,
url_username=None, url_password=None, http_agent=None,
force_basic_auth=None, follow_redirects=None,
client_cert=None, client_key=None, cookies=None, use_gssapi=False,
unix_socket=None, ca_path=None, unredirected_headers=None):
"""
Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3)
Does not require the module environment
Returns :class:`HTTPResponse` object.
:arg method: method for the request
:arg url: URL to request
:kwarg data: (optional) bytes, or file-like object to send
in the body of the request
:kwarg headers: (optional) Dictionary of HTTP Headers to send with the
request
:kwarg use_proxy: (optional) Boolean of whether or not to use proxy
:kwarg force: (optional) Boolean of whether or not to set `cache-control: no-cache` header
:kwarg last_mod_time: (optional) Datetime object to use when setting If-Modified-Since header
:kwarg timeout: (optional) How long to wait for the server to send
data before giving up, as a float
:kwarg validate_certs: (optional) Booleani that controls whether we verify
the server's TLS certificate
:kwarg url_username: (optional) String of the user to use when authenticating
:kwarg url_password: (optional) String of the password to use when authenticating
:kwarg http_agent: (optional) String of the User-Agent to use in the request
:kwarg force_basic_auth: (optional) Boolean determining if auth header should be sent in the initial request
:kwarg follow_redirects: (optional) String of urllib2, all/yes, safe, none to determine how redirects are
followed, see RedirectHandlerFactory for more information
:kwarg client_cert: (optional) PEM formatted certificate chain file to be used for SSL client authentication.
This file can also include the key as well, and if the key is included, client_key is not required
:kwarg client_key: (optional) PEM formatted file that contains your private key to be used for SSL client
authentication. If client_cert contains both the certificate and key, this option is not required
:kwarg cookies: (optional) CookieJar object to send with the
request
:kwarg use_gssapi: (optional) Use GSSAPI handler of requests.
:kwarg unix_socket: (optional) String of file system path to unix socket file to use when establishing
connection to the provided url
:kwarg ca_path: (optional) String of file system path to CA cert bundle to use
:kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request
:returns: HTTPResponse. Added in Ansible 2.9
"""
method = method.upper()
if headers is None:
headers = {}
elif not isinstance(headers, dict):
raise ValueError("headers must be a dict")
headers = dict(self.headers, **headers)
use_proxy = self._fallback(use_proxy, self.use_proxy)
force = self._fallback(force, self.force)
timeout = self._fallback(timeout, self.timeout)
validate_certs = self._fallback(validate_certs, self.validate_certs)
url_username = self._fallback(url_username, self.url_username)
url_password = self._fallback(url_password, self.url_password)
http_agent = self._fallback(http_agent, self.http_agent)
force_basic_auth = self._fallback(force_basic_auth, self.force_basic_auth)
follow_redirects = self._fallback(follow_redirects, self.follow_redirects)
client_cert = self._fallback(client_cert, self.client_cert)
client_key = self._fallback(client_key, self.client_key)
cookies = self._fallback(cookies, self.cookies)
unix_socket = self._fallback(unix_socket, self.unix_socket)
ca_path = self._fallback(ca_path, self.ca_path)
handlers = []
if unix_socket:
handlers.append(UnixHTTPHandler(unix_socket))
ssl_handler = maybe_add_ssl_handler(url, validate_certs, ca_path=ca_path)
if ssl_handler and not HAS_SSLCONTEXT:
handlers.append(ssl_handler)
parsed = generic_urlparse(urlparse(url))
if parsed.scheme != 'ftp':
username = url_username
password = url_password
if username:
netloc = parsed.netloc
elif '@' in parsed.netloc:
credentials, netloc = parsed.netloc.split('@', 1)
if ':' in credentials:
username, password = credentials.split(':', 1)
else:
username = credentials
password = ''
parsed_list = parsed.as_list()
parsed_list[1] = netloc
# reconstruct url without credentials
url = urlunparse(parsed_list)
if use_gssapi:
if HTTPGSSAPIAuthHandler:
handlers.append(HTTPGSSAPIAuthHandler(username, password))
else:
imp_err_msg = missing_required_lib('gssapi', reason='for use_gssapi=True',
url='https://pypi.org/project/gssapi/')
raise MissingModuleError(imp_err_msg, import_traceback=GSSAPI_IMP_ERR)
elif username and not force_basic_auth:
passman = urllib_request.HTTPPasswordMgrWithDefaultRealm()
# this creates a password manager
passman.add_password(None, netloc, username, password)
# because we have put None at the start it will always
# use this username/password combination for urls
# for which `theurl` is a super-url
authhandler = urllib_request.HTTPBasicAuthHandler(passman)
digest_authhandler = urllib_request.HTTPDigestAuthHandler(passman)
# create the AuthHandler
handlers.append(authhandler)
handlers.append(digest_authhandler)
elif username and force_basic_auth:
headers["Authorization"] = basic_auth_header(username, password)
else:
try:
rc = netrc.netrc(os.environ.get('NETRC'))
login = rc.authenticators(parsed.hostname)
except IOError:
login = None
if login:
username, _, password = login
if username and password:
headers["Authorization"] = basic_auth_header(username, password)
if not use_proxy:
proxyhandler = urllib_request.ProxyHandler({})
handlers.append(proxyhandler)
context = None
if HAS_SSLCONTEXT and not validate_certs:
# In 2.7.9, the default context validates certificates
context = SSLContext(ssl.PROTOCOL_SSLv23)
if ssl.OP_NO_SSLv2:
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
context.verify_mode = ssl.CERT_NONE
context.check_hostname = False
handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
client_key=client_key,
context=context,
unix_socket=unix_socket))
elif client_cert or unix_socket:
handlers.append(HTTPSClientAuthHandler(client_cert=client_cert,
client_key=client_key,
unix_socket=unix_socket))
if ssl_handler and HAS_SSLCONTEXT and validate_certs:
tmp_ca_path, cadata, paths_checked = ssl_handler.get_ca_certs()
try:
context = ssl_handler.make_context(tmp_ca_path, cadata)
except NotImplementedError:
pass
# pre-2.6 versions of python cannot use the custom https
# handler, since the socket class is lacking create_connection.
# Some python builds lack HTTPS support.
if hasattr(socket, 'create_connection') and CustomHTTPSHandler:
kwargs = {}
if HAS_SSLCONTEXT:
kwargs['context'] = context
handlers.append(CustomHTTPSHandler(**kwargs))
handlers.append(RedirectHandlerFactory(follow_redirects, validate_certs, ca_path=ca_path))
# add some nicer cookie handling
if cookies is not None:
handlers.append(urllib_request.HTTPCookieProcessor(cookies))
opener = urllib_request.build_opener(*handlers)
urllib_request.install_opener(opener)
data = to_bytes(data, nonstring='passthru')
request = RequestWithMethod(url, method, data)
# add the custom agent header, to help prevent issues
# with sites that block the default urllib agent string
if http_agent:
request.add_header('User-agent', http_agent)
# Cache control
# Either we directly force a cache refresh
if force:
request.add_header('cache-control', 'no-cache')
# or we do it if the original is more recent than our copy
elif last_mod_time:
tstamp = rfc2822_date_string(last_mod_time.timetuple(), 'GMT')
request.add_header('If-Modified-Since', tstamp)
# user defined headers now, which may override things we've set above
unredirected_headers = [h.lower() for h in (unredirected_headers or [])]
for header in headers:
if header.lower() in unredirected_headers:
request.add_unredirected_header(header, headers[header])
else:
request.add_header(header, headers[header])
return urllib_request.urlopen(request, None, timeout)
def get(self, url, **kwargs):
r"""Sends a GET request. Returns :class:`HTTPResponse` object.
:arg url: URL to request
:kwarg \*\*kwargs: Optional arguments that ``open`` takes.
:returns: HTTPResponse
"""
return self.open('GET', url, **kwargs)
def options(self, url, **kwargs):
r"""Sends a OPTIONS request. Returns :class:`HTTPResponse` object.
:arg url: URL to request
:kwarg \*\*kwargs: Optional arguments that ``open`` takes.
:returns: HTTPResponse
"""
return self.open('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
r"""Sends a HEAD request. Returns :class:`HTTPResponse` object.
:arg url: URL to request
:kwarg \*\*kwargs: Optional arguments that ``open`` takes.
:returns: HTTPResponse
"""
return self.open('HEAD', url, **kwargs)
def post(self, url, data=None, **kwargs):
r"""Sends a POST request. Returns :class:`HTTPResponse` object.
:arg url: URL to request.
:kwarg data: (optional) bytes, or file-like object to send in the body of the request.
:kwarg \*\*kwargs: Optional arguments that ``open`` takes.
:returns: HTTPResponse
"""
return self.open('POST', url, data=data, **kwargs)
def put(self, url, data=None, **kwargs):
r"""Sends a PUT request. Returns :class:`HTTPResponse` object.
:arg url: URL to request.
:kwarg data: (optional) bytes, or file-like object to send in the body of the request.
:kwarg \*\*kwargs: Optional arguments that ``open`` takes.
:returns: HTTPResponse
"""
return self.open('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
r"""Sends a PATCH request. Returns :class:`HTTPResponse` object.
:arg url: URL to request.
:kwarg data: (optional) bytes, or file-like object to send in the body of the request.
:kwarg \*\*kwargs: Optional arguments that ``open`` takes.
:returns: HTTPResponse
"""
return self.open('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
r"""Sends a DELETE request. Returns :class:`HTTPResponse` object.
:arg url: URL to request
:kwargs \*\*kwargs: Optional arguments that ``open`` takes.
:returns: HTTPResponse
"""
return self.open('DELETE', url, **kwargs)
def open_url(url, data=None, headers=None, method=None, use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None,
force_basic_auth=False, follow_redirects='urllib2',
client_cert=None, client_key=None, cookies=None,
use_gssapi=False, unix_socket=None, ca_path=None,
unredirected_headers=None):
'''
Sends a request via HTTP(S) or FTP using urllib2 (Python2) or urllib (Python3)
Does not require the module environment
'''
method = method or ('POST' if data else 'GET')
return Request().open(method, url, data=data, headers=headers, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth, follow_redirects=follow_redirects,
client_cert=client_cert, client_key=client_key, cookies=cookies,
use_gssapi=use_gssapi, unix_socket=unix_socket, ca_path=ca_path,
unredirected_headers=unredirected_headers)
def prepare_multipart(fields):
"""Takes a mapping, and prepares a multipart/form-data body
:arg fields: Mapping
:returns: tuple of (content_type, body) where ``content_type`` is
the ``multipart/form-data`` ``Content-Type`` header including
``boundary`` and ``body`` is the prepared bytestring body
Payload content from a file will be base64 encoded and will include
the appropriate ``Content-Transfer-Encoding`` and ``Content-Type``
headers.
Example:
{
"file1": {
"filename": "/bin/true",
"mime_type": "application/octet-stream"
},
"file2": {
"content": "text based file content",
"filename": "fake.txt",
"mime_type": "text/plain",
},
"text_form_field": "value"
}
"""
if not isinstance(fields, Mapping):
raise TypeError(
'Mapping is required, cannot be type %s' % fields.__class__.__name__
)
m = email.mime.multipart.MIMEMultipart('form-data')
for field, value in sorted(fields.items()):
if isinstance(value, string_types):
main_type = 'text'
sub_type = 'plain'
content = value
filename = None
elif isinstance(value, Mapping):
filename = value.get('filename')
content = value.get('content')
if not any((filename, content)):
raise ValueError('at least one of filename or content must be provided')
mime = value.get('mime_type')
if not mime:
try:
mime = mimetypes.guess_type(filename or '', strict=False)[0] or 'application/octet-stream'
except Exception:
mime = 'application/octet-stream'
main_type, sep, sub_type = mime.partition('/')
else:
raise TypeError(
'value must be a string, or mapping, cannot be type %s' % value.__class__.__name__
)
if not content and filename:
with open(to_bytes(filename, errors='surrogate_or_strict'), 'rb') as f:
part = email.mime.application.MIMEApplication(f.read())
del part['Content-Type']
part.add_header('Content-Type', '%s/%s' % (main_type, sub_type))
else:
part = email.mime.nonmultipart.MIMENonMultipart(main_type, sub_type)
part.set_payload(to_bytes(content))
part.add_header('Content-Disposition', 'form-data')
del part['MIME-Version']
part.set_param(
'name',
field,
header='Content-Disposition'
)
if filename:
part.set_param(
'filename',
to_native(os.path.basename(filename)),
header='Content-Disposition'
)
m.attach(part)
if PY3:
# Ensure headers are not split over multiple lines
# The HTTP policy also uses CRLF by default
b_data = m.as_bytes(policy=email.policy.HTTP)
else:
# Py2
# We cannot just call ``as_string`` since it provides no way
# to specify ``maxheaderlen``
fp = cStringIO() # cStringIO seems to be required here
# Ensure headers are not split over multiple lines
g = email.generator.Generator(fp, maxheaderlen=0)
g.flatten(m)
# ``fix_eols`` switches from ``\n`` to ``\r\n``
b_data = email.utils.fix_eols(fp.getvalue())
del m
headers, sep, b_content = b_data.partition(b'\r\n\r\n')
del b_data
if PY3:
parser = email.parser.BytesHeaderParser().parsebytes
else:
# Py2
parser = email.parser.HeaderParser().parsestr
return (
parser(headers)['content-type'], # Message converts to native strings
b_content
)
#
# Module-related functions
#
def basic_auth_header(username, password):
"""Takes a username and password and returns a byte string suitable for
using as value of an Authorization header to do basic auth.
"""
return b"Basic %s" % base64.b64encode(to_bytes("%s:%s" % (username, password), errors='surrogate_or_strict'))
def url_argument_spec():
'''
Creates an argument spec that can be used with any module
that will be requesting content via urllib/urllib2
'''
return dict(
url=dict(type='str'),
force=dict(type='bool', default=False),
http_agent=dict(type='str', default='ansible-httpget'),
use_proxy=dict(type='bool', default=True),
validate_certs=dict(type='bool', default=True),
url_username=dict(type='str'),
url_password=dict(type='str', no_log=True),
force_basic_auth=dict(type='bool', default=False),
client_cert=dict(type='path'),
client_key=dict(type='path'),
use_gssapi=dict(type='bool', default=False),
)
def fetch_url(module, url, data=None, headers=None, method=None,
use_proxy=True, force=False, last_mod_time=None, timeout=10,
use_gssapi=False, unix_socket=None, ca_path=None, cookies=None, unredirected_headers=None):
"""Sends a request via HTTP(S) or FTP (needs the module as parameter)
:arg module: The AnsibleModule (used to get username, password etc. (s.b.).
:arg url: The url to use.
:kwarg data: The data to be sent (in case of POST/PUT).
:kwarg headers: A dict with the request headers.
:kwarg method: "POST", "PUT", etc.
:kwarg boolean use_proxy: Default: True
:kwarg boolean force: If True: Do not get a cached copy (Default: False)
:kwarg last_mod_time: Default: None
:kwarg int timeout: Default: 10
:kwarg boolean use_gssapi: Default: False
:kwarg unix_socket: (optional) String of file system path to unix socket file to use when establishing
connection to the provided url
:kwarg ca_path: (optional) String of file system path to CA cert bundle to use
:kwarg cookies: (optional) CookieJar object to send with the request
:kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request
:returns: A tuple of (**response**, **info**). Use ``response.read()`` to read the data.
The **info** contains the 'status' and other meta data. When a HttpError (status >= 400)
occurred then ``info['body']`` contains the error response data::
Example::
data={...}
resp, info = fetch_url(module,
"http://example.com",
data=module.jsonify(data),
headers={'Content-type': 'application/json'},
method="POST")
status_code = info["status"]
body = resp.read()
if status_code >= 400 :
body = info['body']
"""
if not HAS_URLPARSE:
module.fail_json(msg='urlparse is not installed')
# ensure we use proper tempdir
old_tempdir = tempfile.tempdir
tempfile.tempdir = module.tmpdir
# Get validate_certs from the module params
validate_certs = module.params.get('validate_certs', True)
username = module.params.get('url_username', '')
password = module.params.get('url_password', '')
http_agent = module.params.get('http_agent', 'ansible-httpget')
force_basic_auth = module.params.get('force_basic_auth', '')
follow_redirects = module.params.get('follow_redirects', 'urllib2')
client_cert = module.params.get('client_cert')
client_key = module.params.get('client_key')
use_gssapi = module.params.get('use_gssapi', use_gssapi)
if not isinstance(cookies, cookiejar.CookieJar):
cookies = cookiejar.LWPCookieJar()
r = None
info = dict(url=url, status=-1)
try:
r = open_url(url, data=data, headers=headers, method=method,
use_proxy=use_proxy, force=force, last_mod_time=last_mod_time, timeout=timeout,
validate_certs=validate_certs, url_username=username,
url_password=password, http_agent=http_agent, force_basic_auth=force_basic_auth,
follow_redirects=follow_redirects, client_cert=client_cert,
client_key=client_key, cookies=cookies, use_gssapi=use_gssapi,
unix_socket=unix_socket, ca_path=ca_path, unredirected_headers=unredirected_headers)
# Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable
info.update(dict((k.lower(), v) for k, v in r.info().items()))
# Don't be lossy, append header values for duplicate headers
# In Py2 there is nothing that needs done, py2 does this for us
if PY3:
temp_headers = {}
for name, value in r.headers.items():
# The same as above, lower case keys to match py2 behavior, and create more consistent results
name = name.lower()
if name in temp_headers:
temp_headers[name] = ', '.join((temp_headers[name], value))
else:
temp_headers[name] = value
info.update(temp_headers)
# parse the cookies into a nice dictionary
cookie_list = []
cookie_dict = dict()
# Python sorts cookies in order of most specific (ie. longest) path first. See ``CookieJar._cookie_attrs``
# Cookies with the same path are reversed from response order.
# This code makes no assumptions about that, and accepts the order given by python
for cookie in cookies:
cookie_dict[cookie.name] = cookie.value
cookie_list.append((cookie.name, cookie.value))
info['cookies_string'] = '; '.join('%s=%s' % c for c in cookie_list)
info['cookies'] = cookie_dict
# finally update the result with a message about the fetch
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), url=r.geturl(), status=r.code))
except NoSSLError as e:
distribution = get_distribution()
if distribution is not None and distribution.lower() == 'redhat':
module.fail_json(msg='%s. You can also install python-ssl from EPEL' % to_native(e), **info)
else:
module.fail_json(msg='%s' % to_native(e), **info)
except (ConnectionError, ValueError) as e:
module.fail_json(msg=to_native(e), **info)
except MissingModuleError as e:
module.fail_json(msg=to_text(e), exception=e.import_traceback)
except urllib_error.HTTPError as e:
r = e
try:
if e.fp is None:
# Certain HTTPError objects may not have the ability to call ``.read()`` on Python 3
# This is not handled gracefully in Python 3, and instead an exception is raised from
# tempfile, due to ``urllib.response.addinfourl`` not being initialized
raise AttributeError
body = e.read()
except AttributeError:
body = ''
else:
e.close()
# Try to add exception info to the output but don't fail if we can't
try:
# Lowercase keys, to conform to py2 behavior, so that py3 and py2 are predictable
info.update(dict((k.lower(), v) for k, v in e.info().items()))
except Exception:
pass
info.update({'msg': to_native(e), 'body': body, 'status': e.code})
except urllib_error.URLError as e:
code = int(getattr(e, 'code', -1))
info.update(dict(msg="Request failed: %s" % to_native(e), status=code))
except socket.error as e:
info.update(dict(msg="Connection failure: %s" % to_native(e), status=-1))
except httplib.BadStatusLine as e:
info.update(dict(msg="Connection failure: connection was closed before a valid response was received: %s" % to_native(e.line), status=-1))
except Exception as e:
info.update(dict(msg="An unknown error occurred: %s" % to_native(e), status=-1),
exception=traceback.format_exc())
finally:
tempfile.tempdir = old_tempdir
return r, info
def fetch_file(module, url, data=None, headers=None, method=None,
use_proxy=True, force=False, last_mod_time=None, timeout=10,
unredirected_headers=None):
'''Download and save a file via HTTP(S) or FTP (needs the module as parameter).
This is basically a wrapper around fetch_url().
:arg module: The AnsibleModule (used to get username, password etc. (s.b.).
:arg url: The url to use.
:kwarg data: The data to be sent (in case of POST/PUT).
:kwarg headers: A dict with the request headers.
:kwarg method: "POST", "PUT", etc.
:kwarg boolean use_proxy: Default: True
:kwarg boolean force: If True: Do not get a cached copy (Default: False)
:kwarg last_mod_time: Default: None
:kwarg int timeout: Default: 10
:kwarg unredirected_headers: (optional) A list of headers to not attach on a redirected request
:returns: A string, the path to the downloaded file.
'''
# download file
bufsize = 65536
file_name, file_ext = os.path.splitext(str(url.rsplit('/', 1)[1]))
fetch_temp_file = tempfile.NamedTemporaryFile(dir=module.tmpdir, prefix=file_name, suffix=file_ext, delete=False)
module.add_cleanup_file(fetch_temp_file.name)
try:
rsp, info = fetch_url(module, url, data, headers, method, use_proxy, force, last_mod_time, timeout,
unredirected_headers=unredirected_headers)
if not rsp:
module.fail_json(msg="Failure downloading %s, %s" % (url, info['msg']))
data = rsp.read(bufsize)
while data:
fetch_temp_file.write(data)
data = rsp.read(bufsize)
fetch_temp_file.close()
except Exception as e:
module.fail_json(msg="Failure downloading %s, %s" % (url, to_native(e)))
return fetch_temp_file.name
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,663 |
reboot module hangs when the verbosity is set to connection debug (-vvvv)
|
### Summary
Even though reboot module performs as expected with the versobity 3 (-vvv), when I set it to 4 (-vvvv) it hangs. I tested 2.9.16, 2.9.27 and 2.12.0.
I specified the `become_method=su`. When `become_method=sudo`, it timed out after 5 minutes and the job proceeds as expected.
### Issue Type
Bug Report
### Component Name
reboot
### Ansible Version
```console
### RHEL 8.2
$ ansible --version
ansible 2.9.16
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.6/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.6.8 (default, Dec 5 2019, 15:45:45) [GCC 8.3.1 20191121 (Red Hat 8.3.1-5)]
### RHEL 8.5
$ ansible --version
ansible [core 2.12.0]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /home/sugimura/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.8.8 (default, Aug 11 2021, 06:52:42) [GCC 8.5.0 20210514 (Red Hat 8.5.0-3)]
jinja version = 2.10.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
$
(nothing special)
```
### OS / Environment
RHEL 8.2, 8.5 as a control node. Managed nodes are also RHEL 8.2 and 8.5 that I tested.
### Steps to Reproduce
### playbook.yml
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
---
- hosts: all
gather_facts: false
become: true
tasks:
- name: reboot
reboot:
post_reboot_delay: 30
```
### inventory
```
$ cat inventory
### RHEL 8.2
192.168.0.155 ansible_become_method=su
#192.168.0.155 ansible_become_method=sudo
### RHEL 8.5
#192.168.0.154 ansible_become_method=su
#192.168.0.154 ansible_become_method=sudo
#192.168.0.154 ansible_become_method=su ansible_become_password=XXXXXXXX
#192.168.0.154 ansible_become_method=sudo ansible_become_password=XXXXXXXX
```
### command line
When specified `-vvvv`, it hangs.
```
$ ansible-playbook -i inventory playbook.yml -k -K -vvvv
```
On the other hand `-vvv`, it performs successfully.
```
$ ansible-playbook -i inventory playbook.yml -k -K -vvv
```
### Expected Results
It should be the same behavoir as `-vvv` below.
```
$ ansible-playbook -i inventory playbook.yml -k -K -vvv
ansible-playbook [core 2.12.0]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /home/sugimura/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible-playbook
python version = 3.8.8 (default, Aug 11 2021, 06:52:42) [GCC 8.5.0 20210514 (Red Hat 8.5.0-3)]
jinja version = 2.10.3
libyaml = True
Using /etc/ansible/ansible.cfg as config file
SSH password:
BECOME password[defaults to SSH password]:
...
Escalation succeeded
<192.168.0.155> (0, b'\r\nroot\r\n', b'Shared connection to 192.168.0.155 closed.\r\n')
reboot: system successfully rebooted
<192.168.0.155> ESTABLISH SSH CONNECTION FOR USER: None
<192.168.0.155> SSH: EXEC sshpass -d10 ssh -C -o ControlMaster=auto -o ControlPersist=60s -o ConnectTimeout=10 -o ControlPath=/home/sugimura/.ansible/cp/23220a1dab 192.168.0.155 '/bin/sh -c '"'"'rm -f -r /home/sugimura/.ansible/tmp/ansible-tmp-1641436348.6996555-196425-83290895724414/ > /dev/null 2>&1 && sleep 0'"'"''
<192.168.0.155> (0, b'', b'')
changed: [192.168.0.155] => {
"changed": true,
"elapsed": 30,
"rebooted": true
}
META: ran handlers
META: ran handlers
PLAY RECAP *******************************************************************************************************************************************************************
192.168.0.155 : ok=1 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Actual Results
It hangs.
```console
$ ansible-playbook -i inventory playbook.yml -k -K -vvvv
ansible-playbook [core 2.12.0]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /home/sugimura/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible-playbook
python version = 3.8.8 (default, Aug 11 2021, 06:52:42) [GCC 8.5.0 20210514 (Red Hat 8.5.0-3)]
jinja version = 2.10.3
libyaml = True
Using /etc/ansible/ansible.cfg as config file
SSH password:
BECOME password[defaults to SSH password]:
setting up inventory plugins
...
reboot: rebooting server...
<192.168.0.155> ESTABLISH SSH CONNECTION FOR USER: None
<192.168.0.155> SSH: EXEC sshpass -d10 ssh -vvv -C -o ControlMaster=auto -o ControlPersist=60s -o ConnectTimeout=10 -o ControlPath=/home/sugimura/.ansible/cp/23220a1dab -tt 192.168.0.155 '/bin/sh -c '"'"'su root -c '"'"'"'"'"'"'"'"'/bin/sh -c '"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-vqowuwkomqakrqejoekqdcsybawubrge ; /sbin/shutdown -r 0 "Reboot initiated by Ansible"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"''"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<192.168.0.155> (0, b"\r\nShutdown scheduled for Thu 2022-01-06 11:35:37 JST, use 'shutdown -c' to cancel.\r\n", b"OpenSSH_8.0p1, OpenSSL 1.1.1k FIPS 25 Mar 2021\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug3: /etc/ssh/ssh_config line 52: Including file /etc/ssh/ssh_config.d/05-redhat.conf depth 0\r\ndebug1: Reading configuration data /etc/ssh/ssh_config.d/05-redhat.conf\r\ndebug2: checking match for 'final all' host 192.168.0.155 originally 192.168.0.155\r\ndebug3: /etc/ssh/ssh_config.d/05-redhat.conf line 3: not matched 'final'\r\ndebug2: match not found\r\ndebug3: /etc/ssh/ssh_config.d/05-redhat.conf line 5: Including file /etc/crypto-policies/back-ends/openssh.config depth 1 (parse only)\r\ndebug1: Reading configuration data /etc/crypto-policies/back-ends/openssh.config\r\ndebug3: gss kex names ok: [gss-curve25519-sha256-,gss-nistp256-sha256-,gss-group14-sha256-,gss-group16-sha512-,gss-gex-sha1-,gss-group14-sha1-]\r\ndebug3: kex names ok: [curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1]\r\ndebug1: configuration requests final Match pass\r\ndebug2: resolve_canonicalize: hostname 192.168.0.155 is address\r\ndebug1: re-parsing configuration\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug3: /etc/ssh/ssh_config line 52: Including file /etc/ssh/ssh_config.d/05-redhat.conf depth 0\r\ndebug1: Reading configuration data /etc/ssh/ssh_config.d/05-redhat.conf\r\ndebug2: checking match for 'final all' host 192.168.0.155 originally 192.168.0.155\r\ndebug3: /etc/ssh/ssh_config.d/05-redhat.conf line 3: matched 'final'\r\ndebug2: match found\r\ndebug3: /etc/ssh/ssh_config.d/05-redhat.conf line 5: Including file /etc/crypto-policies/back-ends/openssh.config depth 1\r\ndebug1: Reading configuration data /etc/crypto-policies/back-ends/openssh.config\r\ndebug3: gss kex names ok: [gss-curve25519-sha256-,gss-nistp256-sha256-,gss-group14-sha256-,gss-group16-sha512-,gss-gex-sha1-,gss-group14-sha1-]\r\ndebug3: kex names ok: [curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1]\r\ndebug1: auto-mux: Trying existing master\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug2: mux_client_hello_exchange: master version 4\r\ndebug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r\ndebug3: mux_client_request_session: entering\r\ndebug3: mux_client_request_alive: entering\r\ndebug3: mux_client_request_alive: done pid = 196492\r\ndebug3: mux_client_request_session: session request sent\r\ndebug3: mux_client_read_packet: read header failed: Broken pipe\r\ndebug2: Received exit status from master 0\r\nShared connection to 192.168.0.155 closed.\r\n")
reboot: waiting an additional 30 seconds
reboot: validating reboot
reboot: attempting to get system boot time
<192.168.0.155> ESTABLISH SSH CONNECTION FOR USER: None
<192.168.0.155> SSH: EXEC sshpass -d10 ssh -vvv -C -o ControlMaster=auto -o ControlPersist=60s -o ConnectTimeout=10 -o ControlPath=/home/sugimura/.ansible/cp/23220a1dab -tt 192.168.0.155 '/bin/sh -c '"'"'su root -c '"'"'"'"'"'"'"'"'/bin/sh -c '"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-fwhdcdmwvpurpzythdrmsmpewqactjox ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"''"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
```
And here is the output with `ANSIBLE_DEBUG=1`.
```
$ ANSIBLE_DEBUG=1 ansible-playbook -i inventory playbook.yml -k -K -vvvv
196531 1641436626.42119: starting run
ansible-playbook [core 2.12.0]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /home/sugimura/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible-playbook
python version = 3.8.8 (default, Aug 11 2021, 06:52:42) [GCC 8.5.0 20210514 (Red Hat 8.5.0-3)]
jinja version = 2.10.3
libyaml = True
Using /etc/ansible/ansible.cfg as config file
SSH password:
BECOME password[defaults to SSH password]:
196531 1641436629.13508: Added group all to inventory
196531 1641436629.13525: Added group ungrouped to inventory
196531 1641436629.13535: Group all now contains ungrouped
...
196536 1641436660.99666: become_success: (source=stderr, state=awaiting_prompt): 'debug2: mux_master_process_new_session: channel 1: request tty 1, X 0, agent 0, subsys 0, term "xterm-256color", cmd "/bin/sh -c 'su root -c '"'"'/bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-ozdcnvzirtcdftqbszityhfgcukzjzoe ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"''"'"' && sleep 0'", env 1'
Escalation succeeded
196536 1641436661.18745: stderr chunk (state=3):
>>>debug3: receive packet: type 80
debug1: client_input_global_request: rtype [email protected] want_reply 0
<<<
196536 1641436661.22876: stderr chunk (state=3):
>>>debug3: receive packet: type 91
debug2: channel_input_open_confirmation: channel 2: callback start
debug2: client_session2_setup: id 2
debug2: channel 2: request pty-req confirm 1
debug3: send packet: type 98
<<<
196536 1641436661.22899: stderr chunk (state=3):
>>>debug1: Sending environment.
debug1: Sending env LANG = ja_JP.UTF-8
debug2: channel 2: request env confirm 0
debug3: send packet: type 98
debug1: Sending command: /bin/sh -c 'su root -c '"'"'/bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-ozdcnvzirtcdftqbszityhfgcukzjzoe ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"''"'"' && sleep 0'
debug2: channel 2: request exec confirm 1
debug3: send packet: type 98
debug3: mux_session_confirm: sending success reply
debug2: channel_input_open_confirmation: channel 2: callback done
debug2: channel 2: open confirm rwindow 0 rmax 32768
<<<
196536 1641436661.23997: stderr chunk (state=3):
>>>debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: PTY allocation request accepted on channel 2
debug2: channel 2: rcvd adjust 2097152
debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: exec request accepted on channel 2
<<<
196536 1641436661.61510: stdout chunk (state=3):
>>>パスワード:<<<
```
The `パスワード:` means `Password:` in Japanese. I tested with `LANG=C`, it is the same behavior.
```
...
debug1: Sending command: /bin/sh -c 'su root -c '"'"'/bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-xmjdcyhcajgoprxvrmtkyqdyufnzfdeo ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"''"'"' && sleep 0'
debug2: channel 2: request exec confirm 1
debug3: send packet: type 98
<<<
196581 1641436784.82305: stderr chunk (state=3):
>>>debug3: mux_session_confirm: sending success reply
debug2: channel_input_open_confirmation: channel 2: callback done
debug2: channel 2: open confirm rwindow 0 rmax 32768
<<<
196581 1641436784.83332: stderr chunk (state=3):
>>>debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: PTY allocation request accepted on channel 2
debug2: channel 2: rcvd adjust 2097152
debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: exec request accepted on channel 2
<<<
196581 1641436785.19981: stdout chunk (state=3):
>>>Password: <<<
```
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76663
|
https://github.com/ansible/ansible/pull/76732
|
9142be2f6cabbe6597c9254c5bb9186d17036d55
|
0ff80a15ba40c2aff3b96c1152f19c97a92d3c97
| 2022-01-06T02:40:31Z |
python
| 2022-01-13T21:28:09Z |
changelogs/fragments/ssh_debug_noparse.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,663 |
reboot module hangs when the verbosity is set to connection debug (-vvvv)
|
### Summary
Even though reboot module performs as expected with the versobity 3 (-vvv), when I set it to 4 (-vvvv) it hangs. I tested 2.9.16, 2.9.27 and 2.12.0.
I specified the `become_method=su`. When `become_method=sudo`, it timed out after 5 minutes and the job proceeds as expected.
### Issue Type
Bug Report
### Component Name
reboot
### Ansible Version
```console
### RHEL 8.2
$ ansible --version
ansible 2.9.16
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.6/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.6.8 (default, Dec 5 2019, 15:45:45) [GCC 8.3.1 20191121 (Red Hat 8.3.1-5)]
### RHEL 8.5
$ ansible --version
ansible [core 2.12.0]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /home/sugimura/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.8.8 (default, Aug 11 2021, 06:52:42) [GCC 8.5.0 20210514 (Red Hat 8.5.0-3)]
jinja version = 2.10.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
$
(nothing special)
```
### OS / Environment
RHEL 8.2, 8.5 as a control node. Managed nodes are also RHEL 8.2 and 8.5 that I tested.
### Steps to Reproduce
### playbook.yml
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
---
- hosts: all
gather_facts: false
become: true
tasks:
- name: reboot
reboot:
post_reboot_delay: 30
```
### inventory
```
$ cat inventory
### RHEL 8.2
192.168.0.155 ansible_become_method=su
#192.168.0.155 ansible_become_method=sudo
### RHEL 8.5
#192.168.0.154 ansible_become_method=su
#192.168.0.154 ansible_become_method=sudo
#192.168.0.154 ansible_become_method=su ansible_become_password=XXXXXXXX
#192.168.0.154 ansible_become_method=sudo ansible_become_password=XXXXXXXX
```
### command line
When specified `-vvvv`, it hangs.
```
$ ansible-playbook -i inventory playbook.yml -k -K -vvvv
```
On the other hand `-vvv`, it performs successfully.
```
$ ansible-playbook -i inventory playbook.yml -k -K -vvv
```
### Expected Results
It should be the same behavoir as `-vvv` below.
```
$ ansible-playbook -i inventory playbook.yml -k -K -vvv
ansible-playbook [core 2.12.0]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /home/sugimura/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible-playbook
python version = 3.8.8 (default, Aug 11 2021, 06:52:42) [GCC 8.5.0 20210514 (Red Hat 8.5.0-3)]
jinja version = 2.10.3
libyaml = True
Using /etc/ansible/ansible.cfg as config file
SSH password:
BECOME password[defaults to SSH password]:
...
Escalation succeeded
<192.168.0.155> (0, b'\r\nroot\r\n', b'Shared connection to 192.168.0.155 closed.\r\n')
reboot: system successfully rebooted
<192.168.0.155> ESTABLISH SSH CONNECTION FOR USER: None
<192.168.0.155> SSH: EXEC sshpass -d10 ssh -C -o ControlMaster=auto -o ControlPersist=60s -o ConnectTimeout=10 -o ControlPath=/home/sugimura/.ansible/cp/23220a1dab 192.168.0.155 '/bin/sh -c '"'"'rm -f -r /home/sugimura/.ansible/tmp/ansible-tmp-1641436348.6996555-196425-83290895724414/ > /dev/null 2>&1 && sleep 0'"'"''
<192.168.0.155> (0, b'', b'')
changed: [192.168.0.155] => {
"changed": true,
"elapsed": 30,
"rebooted": true
}
META: ran handlers
META: ran handlers
PLAY RECAP *******************************************************************************************************************************************************************
192.168.0.155 : ok=1 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Actual Results
It hangs.
```console
$ ansible-playbook -i inventory playbook.yml -k -K -vvvv
ansible-playbook [core 2.12.0]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /home/sugimura/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible-playbook
python version = 3.8.8 (default, Aug 11 2021, 06:52:42) [GCC 8.5.0 20210514 (Red Hat 8.5.0-3)]
jinja version = 2.10.3
libyaml = True
Using /etc/ansible/ansible.cfg as config file
SSH password:
BECOME password[defaults to SSH password]:
setting up inventory plugins
...
reboot: rebooting server...
<192.168.0.155> ESTABLISH SSH CONNECTION FOR USER: None
<192.168.0.155> SSH: EXEC sshpass -d10 ssh -vvv -C -o ControlMaster=auto -o ControlPersist=60s -o ConnectTimeout=10 -o ControlPath=/home/sugimura/.ansible/cp/23220a1dab -tt 192.168.0.155 '/bin/sh -c '"'"'su root -c '"'"'"'"'"'"'"'"'/bin/sh -c '"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-vqowuwkomqakrqejoekqdcsybawubrge ; /sbin/shutdown -r 0 "Reboot initiated by Ansible"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"''"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<192.168.0.155> (0, b"\r\nShutdown scheduled for Thu 2022-01-06 11:35:37 JST, use 'shutdown -c' to cancel.\r\n", b"OpenSSH_8.0p1, OpenSSL 1.1.1k FIPS 25 Mar 2021\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug3: /etc/ssh/ssh_config line 52: Including file /etc/ssh/ssh_config.d/05-redhat.conf depth 0\r\ndebug1: Reading configuration data /etc/ssh/ssh_config.d/05-redhat.conf\r\ndebug2: checking match for 'final all' host 192.168.0.155 originally 192.168.0.155\r\ndebug3: /etc/ssh/ssh_config.d/05-redhat.conf line 3: not matched 'final'\r\ndebug2: match not found\r\ndebug3: /etc/ssh/ssh_config.d/05-redhat.conf line 5: Including file /etc/crypto-policies/back-ends/openssh.config depth 1 (parse only)\r\ndebug1: Reading configuration data /etc/crypto-policies/back-ends/openssh.config\r\ndebug3: gss kex names ok: [gss-curve25519-sha256-,gss-nistp256-sha256-,gss-group14-sha256-,gss-group16-sha512-,gss-gex-sha1-,gss-group14-sha1-]\r\ndebug3: kex names ok: [curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1]\r\ndebug1: configuration requests final Match pass\r\ndebug2: resolve_canonicalize: hostname 192.168.0.155 is address\r\ndebug1: re-parsing configuration\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug3: /etc/ssh/ssh_config line 52: Including file /etc/ssh/ssh_config.d/05-redhat.conf depth 0\r\ndebug1: Reading configuration data /etc/ssh/ssh_config.d/05-redhat.conf\r\ndebug2: checking match for 'final all' host 192.168.0.155 originally 192.168.0.155\r\ndebug3: /etc/ssh/ssh_config.d/05-redhat.conf line 3: matched 'final'\r\ndebug2: match found\r\ndebug3: /etc/ssh/ssh_config.d/05-redhat.conf line 5: Including file /etc/crypto-policies/back-ends/openssh.config depth 1\r\ndebug1: Reading configuration data /etc/crypto-policies/back-ends/openssh.config\r\ndebug3: gss kex names ok: [gss-curve25519-sha256-,gss-nistp256-sha256-,gss-group14-sha256-,gss-group16-sha512-,gss-gex-sha1-,gss-group14-sha1-]\r\ndebug3: kex names ok: [curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1]\r\ndebug1: auto-mux: Trying existing master\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug2: mux_client_hello_exchange: master version 4\r\ndebug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r\ndebug3: mux_client_request_session: entering\r\ndebug3: mux_client_request_alive: entering\r\ndebug3: mux_client_request_alive: done pid = 196492\r\ndebug3: mux_client_request_session: session request sent\r\ndebug3: mux_client_read_packet: read header failed: Broken pipe\r\ndebug2: Received exit status from master 0\r\nShared connection to 192.168.0.155 closed.\r\n")
reboot: waiting an additional 30 seconds
reboot: validating reboot
reboot: attempting to get system boot time
<192.168.0.155> ESTABLISH SSH CONNECTION FOR USER: None
<192.168.0.155> SSH: EXEC sshpass -d10 ssh -vvv -C -o ControlMaster=auto -o ControlPersist=60s -o ConnectTimeout=10 -o ControlPath=/home/sugimura/.ansible/cp/23220a1dab -tt 192.168.0.155 '/bin/sh -c '"'"'su root -c '"'"'"'"'"'"'"'"'/bin/sh -c '"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-fwhdcdmwvpurpzythdrmsmpewqactjox ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"''"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
```
And here is the output with `ANSIBLE_DEBUG=1`.
```
$ ANSIBLE_DEBUG=1 ansible-playbook -i inventory playbook.yml -k -K -vvvv
196531 1641436626.42119: starting run
ansible-playbook [core 2.12.0]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /home/sugimura/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible-playbook
python version = 3.8.8 (default, Aug 11 2021, 06:52:42) [GCC 8.5.0 20210514 (Red Hat 8.5.0-3)]
jinja version = 2.10.3
libyaml = True
Using /etc/ansible/ansible.cfg as config file
SSH password:
BECOME password[defaults to SSH password]:
196531 1641436629.13508: Added group all to inventory
196531 1641436629.13525: Added group ungrouped to inventory
196531 1641436629.13535: Group all now contains ungrouped
...
196536 1641436660.99666: become_success: (source=stderr, state=awaiting_prompt): 'debug2: mux_master_process_new_session: channel 1: request tty 1, X 0, agent 0, subsys 0, term "xterm-256color", cmd "/bin/sh -c 'su root -c '"'"'/bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-ozdcnvzirtcdftqbszityhfgcukzjzoe ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"''"'"' && sleep 0'", env 1'
Escalation succeeded
196536 1641436661.18745: stderr chunk (state=3):
>>>debug3: receive packet: type 80
debug1: client_input_global_request: rtype [email protected] want_reply 0
<<<
196536 1641436661.22876: stderr chunk (state=3):
>>>debug3: receive packet: type 91
debug2: channel_input_open_confirmation: channel 2: callback start
debug2: client_session2_setup: id 2
debug2: channel 2: request pty-req confirm 1
debug3: send packet: type 98
<<<
196536 1641436661.22899: stderr chunk (state=3):
>>>debug1: Sending environment.
debug1: Sending env LANG = ja_JP.UTF-8
debug2: channel 2: request env confirm 0
debug3: send packet: type 98
debug1: Sending command: /bin/sh -c 'su root -c '"'"'/bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-ozdcnvzirtcdftqbszityhfgcukzjzoe ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"''"'"' && sleep 0'
debug2: channel 2: request exec confirm 1
debug3: send packet: type 98
debug3: mux_session_confirm: sending success reply
debug2: channel_input_open_confirmation: channel 2: callback done
debug2: channel 2: open confirm rwindow 0 rmax 32768
<<<
196536 1641436661.23997: stderr chunk (state=3):
>>>debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: PTY allocation request accepted on channel 2
debug2: channel 2: rcvd adjust 2097152
debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: exec request accepted on channel 2
<<<
196536 1641436661.61510: stdout chunk (state=3):
>>>パスワード:<<<
```
The `パスワード:` means `Password:` in Japanese. I tested with `LANG=C`, it is the same behavior.
```
...
debug1: Sending command: /bin/sh -c 'su root -c '"'"'/bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-xmjdcyhcajgoprxvrmtkyqdyufnzfdeo ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"''"'"' && sleep 0'
debug2: channel 2: request exec confirm 1
debug3: send packet: type 98
<<<
196581 1641436784.82305: stderr chunk (state=3):
>>>debug3: mux_session_confirm: sending success reply
debug2: channel_input_open_confirmation: channel 2: callback done
debug2: channel 2: open confirm rwindow 0 rmax 32768
<<<
196581 1641436784.83332: stderr chunk (state=3):
>>>debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: PTY allocation request accepted on channel 2
debug2: channel 2: rcvd adjust 2097152
debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: exec request accepted on channel 2
<<<
196581 1641436785.19981: stdout chunk (state=3):
>>>Password: <<<
```
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76663
|
https://github.com/ansible/ansible/pull/76732
|
9142be2f6cabbe6597c9254c5bb9186d17036d55
|
0ff80a15ba40c2aff3b96c1152f19c97a92d3c97
| 2022-01-06T02:40:31Z |
python
| 2022-01-13T21:28:09Z |
lib/ansible/plugins/connection/ssh.py
|
# Copyright (c) 2012, Michael DeHaan <[email protected]>
# Copyright 2015 Abhijit Menon-Sen <[email protected]>
# Copyright 2017 Toshio Kuratomi <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: ssh
short_description: connect via SSH client binary
description:
- This connection plugin allows Ansible to communicate to the target machines through normal SSH command line.
- Ansible does not expose a channel to allow communication between the user and the SSH process to accept
a password manually to decrypt an SSH key when using this connection plugin (which is the default). The
use of C(ssh-agent) is highly recommended.
author: ansible (@core)
extends_documentation_fragment:
- connection_pipelining
version_added: historical
notes:
- Many options default to C(None) here but that only means we do not override the SSH tool's defaults and/or configuration.
For example, if you specify the port in this plugin it will override any C(Port) entry in your C(.ssh/config).
options:
host:
description: Hostname/IP to connect to.
vars:
- name: inventory_hostname
- name: ansible_host
- name: ansible_ssh_host
- name: delegated_vars['ansible_host']
- name: delegated_vars['ansible_ssh_host']
host_key_checking:
description: Determines if SSH should check host keys.
default: True
type: boolean
ini:
- section: defaults
key: 'host_key_checking'
- section: ssh_connection
key: 'host_key_checking'
version_added: '2.5'
env:
- name: ANSIBLE_HOST_KEY_CHECKING
- name: ANSIBLE_SSH_HOST_KEY_CHECKING
version_added: '2.5'
vars:
- name: ansible_host_key_checking
version_added: '2.5'
- name: ansible_ssh_host_key_checking
version_added: '2.5'
password:
description: Authentication password for the C(remote_user). Can be supplied as CLI option.
vars:
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_ssh_password
sshpass_prompt:
description:
- Password prompt that sshpass should search for. Supported by sshpass 1.06 and up.
- Defaults to C(Enter PIN for) when pkcs11_provider is set.
default: ''
ini:
- section: 'ssh_connection'
key: 'sshpass_prompt'
env:
- name: ANSIBLE_SSHPASS_PROMPT
vars:
- name: ansible_sshpass_prompt
version_added: '2.10'
ssh_args:
description: Arguments to pass to all SSH CLI tools.
default: '-C -o ControlMaster=auto -o ControlPersist=60s'
ini:
- section: 'ssh_connection'
key: 'ssh_args'
env:
- name: ANSIBLE_SSH_ARGS
vars:
- name: ansible_ssh_args
version_added: '2.7'
ssh_common_args:
description: Common extra args for all SSH CLI tools.
ini:
- section: 'ssh_connection'
key: 'ssh_common_args'
version_added: '2.7'
env:
- name: ANSIBLE_SSH_COMMON_ARGS
version_added: '2.7'
vars:
- name: ansible_ssh_common_args
cli:
- name: ssh_common_args
default: ''
ssh_executable:
default: ssh
description:
- This defines the location of the SSH binary. It defaults to C(ssh) which will use the first SSH binary available in $PATH.
- This option is usually not required, it might be useful when access to system SSH is restricted,
or when using SSH wrappers to connect to remote hosts.
env: [{name: ANSIBLE_SSH_EXECUTABLE}]
ini:
- {key: ssh_executable, section: ssh_connection}
#const: ANSIBLE_SSH_EXECUTABLE
version_added: "2.2"
vars:
- name: ansible_ssh_executable
version_added: '2.7'
sftp_executable:
default: sftp
description:
- This defines the location of the sftp binary. It defaults to C(sftp) which will use the first binary available in $PATH.
env: [{name: ANSIBLE_SFTP_EXECUTABLE}]
ini:
- {key: sftp_executable, section: ssh_connection}
version_added: "2.6"
vars:
- name: ansible_sftp_executable
version_added: '2.7'
scp_executable:
default: scp
description:
- This defines the location of the scp binary. It defaults to C(scp) which will use the first binary available in $PATH.
env: [{name: ANSIBLE_SCP_EXECUTABLE}]
ini:
- {key: scp_executable, section: ssh_connection}
version_added: "2.6"
vars:
- name: ansible_scp_executable
version_added: '2.7'
scp_extra_args:
description: Extra exclusive to the C(scp) CLI
vars:
- name: ansible_scp_extra_args
env:
- name: ANSIBLE_SCP_EXTRA_ARGS
version_added: '2.7'
ini:
- key: scp_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: scp_extra_args
default: ''
sftp_extra_args:
description: Extra exclusive to the C(sftp) CLI
vars:
- name: ansible_sftp_extra_args
env:
- name: ANSIBLE_SFTP_EXTRA_ARGS
version_added: '2.7'
ini:
- key: sftp_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: sftp_extra_args
default: ''
ssh_extra_args:
description: Extra exclusive to the SSH CLI.
vars:
- name: ansible_ssh_extra_args
env:
- name: ANSIBLE_SSH_EXTRA_ARGS
version_added: '2.7'
ini:
- key: ssh_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: ssh_extra_args
default: ''
reconnection_retries:
description: Number of attempts to connect.
default: 0
type: integer
env:
- name: ANSIBLE_SSH_RETRIES
ini:
- section: connection
key: retries
- section: ssh_connection
key: retries
vars:
- name: ansible_ssh_retries
version_added: '2.7'
port:
description: Remote port to connect to.
type: int
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
- name: ansible_ssh_port
remote_user:
description:
- User name with which to login to the remote server, normally set by the remote_user keyword.
- If no user is supplied, Ansible will let the SSH client binary choose the user as it normally.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
- name: ansible_ssh_user
cli:
- name: user
pipelining:
env:
- name: ANSIBLE_PIPELINING
- name: ANSIBLE_SSH_PIPELINING
ini:
- section: defaults
key: pipelining
- section: connection
key: pipelining
- section: ssh_connection
key: pipelining
vars:
- name: ansible_pipelining
- name: ansible_ssh_pipelining
private_key_file:
description:
- Path to private key file to use for authentication.
ini:
- section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
- name: ansible_ssh_private_key_file
cli:
- name: private_key_file
option: '--private-key'
control_path:
description:
- This is the location to save SSH's ControlPath sockets, it uses SSH's variable substitution.
- Since 2.3, if null (default), ansible will generate a unique hash. Use ``%(directory)s`` to indicate where to use the control dir path setting.
- Before 2.3 it defaulted to ``control_path=%(directory)s/ansible-ssh-%%h-%%p-%%r``.
- Be aware that this setting is ignored if C(-o ControlPath) is set in ssh args.
env:
- name: ANSIBLE_SSH_CONTROL_PATH
ini:
- key: control_path
section: ssh_connection
vars:
- name: ansible_control_path
version_added: '2.7'
control_path_dir:
default: ~/.ansible/cp
description:
- This sets the directory to use for ssh control path if the control path setting is null.
- Also, provides the ``%(directory)s`` variable for the control path setting.
env:
- name: ANSIBLE_SSH_CONTROL_PATH_DIR
ini:
- section: ssh_connection
key: control_path_dir
vars:
- name: ansible_control_path_dir
version_added: '2.7'
sftp_batch_mode:
default: 'yes'
description: 'TODO: write it'
env: [{name: ANSIBLE_SFTP_BATCH_MODE}]
ini:
- {key: sftp_batch_mode, section: ssh_connection}
type: bool
vars:
- name: ansible_sftp_batch_mode
version_added: '2.7'
ssh_transfer_method:
description:
- "Preferred method to use when transferring files over ssh"
- Setting to 'smart' (default) will try them in order, until one succeeds or they all fail
- Using 'piped' creates an ssh pipe with C(dd) on either side to copy the data
choices: ['sftp', 'scp', 'piped', 'smart']
env: [{name: ANSIBLE_SSH_TRANSFER_METHOD}]
ini:
- {key: transfer_method, section: ssh_connection}
vars:
- name: ansible_ssh_transfer_method
version_added: '2.12'
scp_if_ssh:
deprecated:
why: In favor of the "ssh_transfer_method" option.
version: "2.17"
alternatives: ssh_transfer_method
default: smart
description:
- "Preferred method to use when transfering files over SSH."
- When set to I(smart), Ansible will try them until one succeeds or they all fail.
- If set to I(True), it will force 'scp', if I(False) it will use 'sftp'.
- This setting will overridden by ssh_transfer_method if set.
env: [{name: ANSIBLE_SCP_IF_SSH}]
ini:
- {key: scp_if_ssh, section: ssh_connection}
vars:
- name: ansible_scp_if_ssh
version_added: '2.7'
use_tty:
version_added: '2.5'
default: 'yes'
description: add -tt to ssh commands to force tty allocation.
env: [{name: ANSIBLE_SSH_USETTY}]
ini:
- {key: usetty, section: ssh_connection}
type: bool
vars:
- name: ansible_ssh_use_tty
version_added: '2.7'
timeout:
default: 10
description:
- This is the default ammount of time we will wait while establishing an SSH connection.
- It also controls how long we can wait to access reading the connection once established (select on the socket).
env:
- name: ANSIBLE_TIMEOUT
- name: ANSIBLE_SSH_TIMEOUT
version_added: '2.11'
ini:
- key: timeout
section: defaults
- key: timeout
section: ssh_connection
version_added: '2.11'
vars:
- name: ansible_ssh_timeout
version_added: '2.11'
cli:
- name: timeout
type: integer
pkcs11_provider:
version_added: '2.12'
default: ""
description:
- "PKCS11 SmartCard provider such as opensc, example: /usr/local/lib/opensc-pkcs11.so"
- Requires sshpass version 1.06+, sshpass must support the -P option.
env: [{name: ANSIBLE_PKCS11_PROVIDER}]
ini:
- {key: pkcs11_provider, section: ssh_connection}
vars:
- name: ansible_ssh_pkcs11_provider
'''
import errno
import fcntl
import hashlib
import os
import pty
import re
import shlex
import subprocess
import time
from functools import wraps
from ansible.errors import (
AnsibleAuthenticationFailure,
AnsibleConnectionFailure,
AnsibleError,
AnsibleFileNotFound,
)
from ansible.errors import AnsibleOptionsError
from ansible.module_utils.compat import selectors
from ansible.module_utils.six import PY3, text_type, binary_type
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.parsing.convert_bool import BOOLEANS, boolean
from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.plugins.shell.powershell import _parse_clixml
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath, makedirs_safe
display = Display()
b_NOT_SSH_ERRORS = (b'Traceback (most recent call last):', # Python-2.6 when there's an exception
# while invoking a script via -m
b'PHP Parse error:', # Php always returns error 255
)
SSHPASS_AVAILABLE = None
class AnsibleControlPersistBrokenPipeError(AnsibleError):
''' ControlPersist broken pipe '''
pass
def _handle_error(remaining_retries, command, return_tuple, no_log, host, display=display):
# sshpass errors
if command == b'sshpass':
# Error 5 is invalid/incorrect password. Raise an exception to prevent retries from locking the account.
if return_tuple[0] == 5:
msg = 'Invalid/incorrect username/password. Skipping remaining {0} retries to prevent account lockout:'.format(remaining_retries)
if remaining_retries <= 0:
msg = 'Invalid/incorrect password:'
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip())
raise AnsibleAuthenticationFailure(msg)
# sshpass returns codes are 1-6. We handle 5 previously, so this catches other scenarios.
# No exception is raised, so the connection is retried - except when attempting to use
# sshpass_prompt with an sshpass that won't let us pass -P, in which case we fail loudly.
elif return_tuple[0] in [1, 2, 3, 4, 6]:
msg = 'sshpass error:'
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
details = to_native(return_tuple[2]).rstrip()
if "sshpass: invalid option -- 'P'" in details:
details = 'Installed sshpass version does not support customized password prompts. ' \
'Upgrade sshpass to use sshpass_prompt, or otherwise switch to ssh keys.'
raise AnsibleError('{0} {1}'.format(msg, details))
msg = '{0} {1}'.format(msg, details)
if return_tuple[0] == 255:
SSH_ERROR = True
for signature in b_NOT_SSH_ERRORS:
if signature in return_tuple[1]:
SSH_ERROR = False
break
if SSH_ERROR:
msg = "Failed to connect to the host via ssh:"
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip())
raise AnsibleConnectionFailure(msg)
# For other errors, no exception is raised so the connection is retried and we only log the messages
if 1 <= return_tuple[0] <= 254:
msg = u"Failed to connect to the host via ssh:"
if no_log:
msg = u'{0} <error censored due to no log>'.format(msg)
else:
msg = u'{0} {1}'.format(msg, to_text(return_tuple[2]).rstrip())
display.vvv(msg, host=host)
def _ssh_retry(func):
"""
Decorator to retry ssh/scp/sftp in the case of a connection failure
Will retry if:
* an exception is caught
* ssh returns 255
Will not retry if
* sshpass returns 5 (invalid password, to prevent account lockouts)
* remaining_tries is < 2
* retries limit reached
"""
@wraps(func)
def wrapped(self, *args, **kwargs):
remaining_tries = int(self.get_option('reconnection_retries')) + 1
cmd_summary = u"%s..." % to_text(args[0])
conn_password = self.get_option('password') or self._play_context.password
for attempt in range(remaining_tries):
cmd = args[0]
if attempt != 0 and conn_password and isinstance(cmd, list):
# If this is a retry, the fd/pipe for sshpass is closed, and we need a new one
self.sshpass_pipe = os.pipe()
cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
try:
try:
return_tuple = func(self, *args, **kwargs)
# TODO: this should come from task
if self._play_context.no_log:
display.vvv(u'rc=%s, stdout and stderr censored due to no log' % return_tuple[0], host=self.host)
else:
display.vvv(return_tuple, host=self.host)
# 0 = success
# 1-254 = remote command return code
# 255 could be a failure from the ssh command itself
except (AnsibleControlPersistBrokenPipeError):
# Retry one more time because of the ControlPersist broken pipe (see #16731)
cmd = args[0]
if conn_password and isinstance(cmd, list):
# This is a retry, so the fd/pipe for sshpass is closed, and we need a new one
self.sshpass_pipe = os.pipe()
cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
display.vvv(u"RETRYING BECAUSE OF CONTROLPERSIST BROKEN PIPE")
return_tuple = func(self, *args, **kwargs)
remaining_retries = remaining_tries - attempt - 1
_handle_error(remaining_retries, cmd[0], return_tuple, self._play_context.no_log, self.host)
break
# 5 = Invalid/incorrect password from sshpass
except AnsibleAuthenticationFailure:
# Raising this exception, which is subclassed from AnsibleConnectionFailure, prevents further retries
raise
except (AnsibleConnectionFailure, Exception) as e:
if attempt == remaining_tries - 1:
raise
else:
pause = 2 ** attempt - 1
if pause > 30:
pause = 30
if isinstance(e, AnsibleConnectionFailure):
msg = u"ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt + 1, cmd_summary, pause)
else:
msg = (u"ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), "
u"pausing for %d seconds" % (attempt + 1, to_text(e), cmd_summary, pause))
display.vv(msg, host=self.host)
time.sleep(pause)
continue
return return_tuple
return wrapped
class Connection(ConnectionBase):
''' ssh based connections '''
transport = 'ssh'
has_pipelining = True
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
# TODO: all should come from get_option(), but not might be set at this point yet
self.host = self._play_context.remote_addr
self.port = self._play_context.port
self.user = self._play_context.remote_user
self.control_path = None
self.control_path_dir = None
# Windows operates differently from a POSIX connection/shell plugin,
# we need to set various properties to ensure SSH on Windows continues
# to work
if getattr(self._shell, "_IS_WINDOWS", False):
self.has_native_async = True
self.always_pipeline_modules = True
self.module_implementation_preferences = ('.ps1', '.exe', '')
self.allow_executable = False
# The connection is created by running ssh/scp/sftp from the exec_command,
# put_file, and fetch_file methods, so we don't need to do any connection
# management here.
def _connect(self):
return self
@staticmethod
def _create_control_path(host, port, user, connection=None, pid=None):
'''Make a hash for the controlpath based on con attributes'''
pstring = '%s-%s-%s' % (host, port, user)
if connection:
pstring += '-%s' % connection
if pid:
pstring += '-%s' % to_text(pid)
m = hashlib.sha1()
m.update(to_bytes(pstring))
digest = m.hexdigest()
cpath = '%(directory)s/' + digest[:10]
return cpath
@staticmethod
def _sshpass_available():
global SSHPASS_AVAILABLE
# We test once if sshpass is available, and remember the result. It
# would be nice to use distutils.spawn.find_executable for this, but
# distutils isn't always available; shutils.which() is Python3-only.
if SSHPASS_AVAILABLE is None:
try:
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
SSHPASS_AVAILABLE = True
except OSError:
SSHPASS_AVAILABLE = False
return SSHPASS_AVAILABLE
@staticmethod
def _persistence_controls(b_command):
'''
Takes a command array and scans it for ControlPersist and ControlPath
settings and returns two booleans indicating whether either was found.
This could be smarter, e.g. returning false if ControlPersist is 'no',
but for now we do it simple way.
'''
controlpersist = False
controlpath = False
for b_arg in (a.lower() for a in b_command):
if b'controlpersist' in b_arg:
controlpersist = True
elif b'controlpath' in b_arg:
controlpath = True
return controlpersist, controlpath
def _add_args(self, b_command, b_args, explanation):
"""
Adds arguments to the ssh command and displays a caller-supplied explanation of why.
:arg b_command: A list containing the command to add the new arguments to.
This list will be modified by this method.
:arg b_args: An iterable of new arguments to add. This iterable is used
more than once so it must be persistent (ie: a list is okay but a
StringIO would not)
:arg explanation: A text string containing explaining why the arguments
were added. It will be displayed with a high enough verbosity.
.. note:: This function does its work via side-effect. The b_command list has the new arguments appended.
"""
display.vvvvv(u'SSH: %s: (%s)' % (explanation, ')('.join(to_text(a) for a in b_args)), host=self.host)
b_command += b_args
def _build_command(self, binary, subsystem, *other_args):
'''
Takes a executable (ssh, scp, sftp or wrapper) and optional extra arguments and returns the remote command
wrapped in local ssh shell commands and ready for execution.
:arg binary: actual executable to use to execute command.
:arg subsystem: type of executable provided, ssh/sftp/scp, needed because wrappers for ssh might have diff names.
:arg other_args: dict of, value pairs passed as arguments to the ssh binary
'''
b_command = []
conn_password = self.get_option('password') or self._play_context.password
#
# First, the command to invoke
#
# If we want to use password authentication, we have to set up a pipe to
# write the password to sshpass.
pkcs11_provider = self.get_option("pkcs11_provider")
if conn_password or pkcs11_provider:
if not self._sshpass_available():
raise AnsibleError("to use the 'ssh' connection type with passwords or pkcs11_provider, you must install the sshpass program")
if not conn_password and pkcs11_provider:
raise AnsibleError("to use pkcs11_provider you must specify a password/pin")
self.sshpass_pipe = os.pipe()
b_command += [b'sshpass', b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')]
password_prompt = self.get_option('sshpass_prompt')
if not password_prompt and pkcs11_provider:
# Set default password prompt for pkcs11_provider to make it clear its a PIN
password_prompt = 'Enter PIN for '
if password_prompt:
b_command += [b'-P', to_bytes(password_prompt, errors='surrogate_or_strict')]
b_command += [to_bytes(binary, errors='surrogate_or_strict')]
#
# Next, additional arguments based on the configuration.
#
# pkcs11 mode allows the use of Smartcards or Yubikey devices
if conn_password and pkcs11_provider:
self._add_args(b_command,
(b"-o", b"KbdInteractiveAuthentication=no",
b"-o", b"PreferredAuthentications=publickey",
b"-o", b"PasswordAuthentication=no",
b'-o', to_bytes(u'PKCS11Provider=%s' % pkcs11_provider)),
u'Enable pkcs11')
# sftp batch mode allows us to correctly catch failed transfers, but can
# be disabled if the client side doesn't support the option. However,
# sftp batch mode does not prompt for passwords so it must be disabled
# if not using controlpersist and using sshpass
if subsystem == 'sftp' and self.get_option('sftp_batch_mode'):
if conn_password:
b_args = [b'-o', b'BatchMode=no']
self._add_args(b_command, b_args, u'disable batch mode for sshpass')
b_command += [b'-b', b'-']
if self._play_context.verbosity > 3:
b_command.append(b'-vvv')
# Next, we add ssh_args
ssh_args = self.get_option('ssh_args')
if ssh_args:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in
self._split_ssh_args(ssh_args)]
self._add_args(b_command, b_args, u"ansible.cfg set ssh_args")
# Now we add various arguments that have their own specific settings defined in docs above.
if self.get_option('host_key_checking') is False:
b_args = (b"-o", b"StrictHostKeyChecking=no")
self._add_args(b_command, b_args, u"ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled")
self.port = self.get_option('port')
if self.port is not None:
b_args = (b"-o", b"Port=" + to_bytes(self.port, nonstring='simplerepr', errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set")
key = self.get_option('private_key_file')
if key:
b_args = (b"-o", b'IdentityFile="' + to_bytes(os.path.expanduser(key), errors='surrogate_or_strict') + b'"')
self._add_args(b_command, b_args, u"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set")
if not conn_password:
self._add_args(
b_command, (
b"-o", b"KbdInteractiveAuthentication=no",
b"-o", b"PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
b"-o", b"PasswordAuthentication=no"
),
u"ansible_password/ansible_ssh_password not set"
)
self.user = self.get_option('remote_user')
if self.user:
self._add_args(
b_command,
(b"-o", b'User="%s"' % to_bytes(self.user, errors='surrogate_or_strict')),
u"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set"
)
timeout = self.get_option('timeout')
self._add_args(
b_command,
(b"-o", b"ConnectTimeout=" + to_bytes(timeout, errors='surrogate_or_strict', nonstring='simplerepr')),
u"ANSIBLE_TIMEOUT/timeout set"
)
# Add in any common or binary-specific arguments from the PlayContext
# (i.e. inventory or task settings or overrides on the command line).
for opt in (u'ssh_common_args', u'{0}_extra_args'.format(subsystem)):
attr = self.get_option(opt)
if attr is not None:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in self._split_ssh_args(attr)]
self._add_args(b_command, b_args, u"Set %s" % opt)
# Check if ControlPersist is enabled and add a ControlPath if one hasn't
# already been set.
controlpersist, controlpath = self._persistence_controls(b_command)
if controlpersist:
self._persistent = True
if not controlpath:
self.control_path_dir = self.get_option('control_path_dir')
cpdir = unfrackpath(self.control_path_dir)
b_cpdir = to_bytes(cpdir, errors='surrogate_or_strict')
# The directory must exist and be writable.
makedirs_safe(b_cpdir, 0o700)
if not os.access(b_cpdir, os.W_OK):
raise AnsibleError("Cannot write to ControlPath %s" % to_native(cpdir))
self.control_path = self.get_option('control_path')
if not self.control_path:
self.control_path = self._create_control_path(
self.host,
self.port,
self.user
)
b_args = (b"-o", b'ControlPath="%s"' % to_bytes(self.control_path % dict(directory=cpdir), errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"found only ControlPersist; added ControlPath")
# Finally, we add any caller-supplied extras.
if other_args:
b_command += [to_bytes(a) for a in other_args]
return b_command
def _send_initial_data(self, fh, in_data, ssh_process):
'''
Writes initial data to the stdin filehandle of the subprocess and closes
it. (The handle must be closed; otherwise, for example, "sftp -b -" will
just hang forever waiting for more commands.)
'''
display.debug(u'Sending initial data')
try:
fh.write(to_bytes(in_data))
fh.close()
except (OSError, IOError) as e:
# The ssh connection may have already terminated at this point, with a more useful error
# Only raise AnsibleConnectionFailure if the ssh process is still alive
time.sleep(0.001)
ssh_process.poll()
if getattr(ssh_process, 'returncode', None) is None:
raise AnsibleConnectionFailure(
'Data could not be sent to remote host "%s". Make sure this host can be reached '
'over ssh: %s' % (self.host, to_native(e)), orig_exc=e
)
display.debug(u'Sent initial data (%d bytes)' % len(in_data))
# Used by _run() to kill processes on failures
@staticmethod
def _terminate_process(p):
""" Terminate a process, ignoring errors """
try:
p.terminate()
except (OSError, IOError):
pass
# This is separate from _run() because we need to do the same thing for stdout
# and stderr.
def _examine_output(self, source, state, b_chunk, sudoable):
'''
Takes a string, extracts complete lines from it, tests to see if they
are a prompt, error message, etc., and sets appropriate flags in self.
Prompt and success lines are removed.
Returns the processed (i.e. possibly-edited) output and the unprocessed
remainder (to be processed with the next chunk) as strings.
'''
output = []
for b_line in b_chunk.splitlines(True):
display_line = to_text(b_line).rstrip('\r\n')
suppress_output = False
# display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, display_line))
if self.become.expect_prompt() and self.become.check_password_prompt(b_line):
display.debug(u"become_prompt: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_prompt'] = True
suppress_output = True
elif self.become.success and self.become.check_success(b_line):
display.debug(u"become_success: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_success'] = True
suppress_output = True
elif sudoable and self.become.check_incorrect_password(b_line):
display.debug(u"become_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_error'] = True
elif sudoable and self.become.check_missing_password(b_line):
display.debug(u"become_nopasswd_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_nopasswd_error'] = True
if not suppress_output:
output.append(b_line)
# The chunk we read was most likely a series of complete lines, but just
# in case the last line was incomplete (and not a prompt, which we would
# have removed from the output), we retain it to be processed with the
# next chunk.
remainder = b''
if output and not output[-1].endswith(b'\n'):
remainder = output[-1]
output = output[:-1]
return b''.join(output), remainder
def _bare_run(self, cmd, in_data, sudoable=True, checkrc=True):
'''
Starts the command and communicates with it until it ends.
'''
# We don't use _shell.quote as this is run on the controller and independent from the shell plugin chosen
display_cmd = u' '.join(shlex.quote(to_text(c)) for c in cmd)
display.vvv(u'SSH: EXEC {0}'.format(display_cmd), host=self.host)
# Start the given command. If we don't need to pipeline data, we can try
# to use a pseudo-tty (ssh will have been invoked with -tt). If we are
# pipelining data, or can't create a pty, we fall back to using plain
# old pipes.
p = None
if isinstance(cmd, (text_type, binary_type)):
cmd = to_bytes(cmd)
else:
cmd = list(map(to_bytes, cmd))
conn_password = self.get_option('password') or self._play_context.password
if not in_data:
try:
# Make sure stdin is a proper pty to avoid tcgetattr errors
master, slave = pty.openpty()
if PY3 and conn_password:
# pylint: disable=unexpected-keyword-arg
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
else:
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = os.fdopen(master, 'wb', 0)
os.close(slave)
except (OSError, IOError):
p = None
if not p:
try:
if PY3 and conn_password:
# pylint: disable=unexpected-keyword-arg
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
else:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdin = p.stdin
except (OSError, IOError) as e:
raise AnsibleError('Unable to execute ssh command line on a controller due to: %s' % to_native(e))
# If we are using SSH password authentication, write the password into
# the pipe we opened in _build_command.
if conn_password:
os.close(self.sshpass_pipe[0])
try:
os.write(self.sshpass_pipe[1], to_bytes(conn_password) + b'\n')
except OSError as e:
# Ignore broken pipe errors if the sshpass process has exited.
if e.errno != errno.EPIPE or p.poll() is None:
raise
os.close(self.sshpass_pipe[1])
#
# SSH state machine
#
# Now we read and accumulate output from the running process until it
# exits. Depending on the circumstances, we may also need to write an
# escalation password and/or pipelined input to the process.
states = [
'awaiting_prompt', 'awaiting_escalation', 'ready_to_send', 'awaiting_exit'
]
# Are we requesting privilege escalation? Right now, we may be invoked
# to execute sftp/scp with sudoable=True, but we can request escalation
# only when using ssh. Otherwise we can send initial data straightaway.
state = states.index('ready_to_send')
if to_bytes(self.get_option('ssh_executable')) in cmd and sudoable:
prompt = getattr(self.become, 'prompt', None)
if prompt:
# We're requesting escalation with a password, so we have to
# wait for a password prompt.
state = states.index('awaiting_prompt')
display.debug(u'Initial state: %s: %s' % (states[state], to_text(prompt)))
elif self.become and self.become.success:
# We're requesting escalation without a password, so we have to
# detect success/failure before sending any initial data.
state = states.index('awaiting_escalation')
display.debug(u'Initial state: %s: %s' % (states[state], to_text(self.become.success)))
# We store accumulated stdout and stderr output from the process here,
# but strip any privilege escalation prompt/confirmation lines first.
# Output is accumulated into tmp_*, complete lines are extracted into
# an array, then checked and removed or copied to stdout or stderr. We
# set any flags based on examining the output in self._flags.
b_stdout = b_stderr = b''
b_tmp_stdout = b_tmp_stderr = b''
self._flags = dict(
become_prompt=False, become_success=False,
become_error=False, become_nopasswd_error=False
)
# select timeout should be longer than the connect timeout, otherwise
# they will race each other when we can't connect, and the connect
# timeout usually fails
timeout = 2 + self.get_option('timeout')
for fd in (p.stdout, p.stderr):
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
# TODO: bcoca would like to use SelectSelector() when open
# select is faster when filehandles is low and we only ever handle 1.
selector = selectors.DefaultSelector()
selector.register(p.stdout, selectors.EVENT_READ)
selector.register(p.stderr, selectors.EVENT_READ)
# If we can send initial data without waiting for anything, we do so
# before we start polling
if states[state] == 'ready_to_send' and in_data:
self._send_initial_data(stdin, in_data, p)
state += 1
try:
while True:
poll = p.poll()
events = selector.select(timeout)
# We pay attention to timeouts only while negotiating a prompt.
if not events:
# We timed out
if state <= states.index('awaiting_escalation'):
# If the process has already exited, then it's not really a
# timeout; we'll let the normal error handling deal with it.
if poll is not None:
break
self._terminate_process(p)
raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, to_native(b_stdout)))
# Read whatever output is available on stdout and stderr, and stop
# listening to the pipe if it's been closed.
for key, event in events:
if key.fileobj == p.stdout:
b_chunk = p.stdout.read()
if b_chunk == b'':
# stdout has been closed, stop watching it
selector.unregister(p.stdout)
# When ssh has ControlMaster (+ControlPath/Persist) enabled, the
# first connection goes into the background and we never see EOF
# on stderr. If we see EOF on stdout, lower the select timeout
# to reduce the time wasted selecting on stderr if we observe
# that the process has not yet existed after this EOF. Otherwise
# we may spend a long timeout period waiting for an EOF that is
# not going to arrive until the persisted connection closes.
timeout = 1
b_tmp_stdout += b_chunk
display.debug(u"stdout chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
elif key.fileobj == p.stderr:
b_chunk = p.stderr.read()
if b_chunk == b'':
# stderr has been closed, stop watching it
selector.unregister(p.stderr)
b_tmp_stderr += b_chunk
display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
# We examine the output line-by-line until we have negotiated any
# privilege escalation prompt and subsequent success/error message.
# Afterwards, we can accumulate output without looking at it.
if state < states.index('ready_to_send'):
if b_tmp_stdout:
b_output, b_unprocessed = self._examine_output('stdout', states[state], b_tmp_stdout, sudoable)
b_stdout += b_output
b_tmp_stdout = b_unprocessed
if b_tmp_stderr:
b_output, b_unprocessed = self._examine_output('stderr', states[state], b_tmp_stderr, sudoable)
b_stderr += b_output
b_tmp_stderr = b_unprocessed
else:
b_stdout += b_tmp_stdout
b_stderr += b_tmp_stderr
b_tmp_stdout = b_tmp_stderr = b''
# If we see a privilege escalation prompt, we send the password.
# (If we're expecting a prompt but the escalation succeeds, we
# didn't need the password and can carry on regardless.)
if states[state] == 'awaiting_prompt':
if self._flags['become_prompt']:
display.debug(u'Sending become_password in response to prompt')
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
# On python3 stdin is a BufferedWriter, and we don't have a guarantee
# that the write will happen without a flush
stdin.flush()
self._flags['become_prompt'] = False
state += 1
elif self._flags['become_success']:
state += 1
# We've requested escalation (with or without a password), now we
# wait for an error message or a successful escalation.
if states[state] == 'awaiting_escalation':
if self._flags['become_success']:
display.vvv(u'Escalation succeeded')
self._flags['become_success'] = False
state += 1
elif self._flags['become_error']:
display.vvv(u'Escalation failed')
self._terminate_process(p)
self._flags['become_error'] = False
raise AnsibleError('Incorrect %s password' % self.become.name)
elif self._flags['become_nopasswd_error']:
display.vvv(u'Escalation requires password')
self._terminate_process(p)
self._flags['become_nopasswd_error'] = False
raise AnsibleError('Missing %s password' % self.become.name)
elif self._flags['become_prompt']:
# This shouldn't happen, because we should see the "Sorry,
# try again" message first.
display.vvv(u'Escalation prompt repeated')
self._terminate_process(p)
self._flags['become_prompt'] = False
raise AnsibleError('Incorrect %s password' % self.become.name)
# Once we're sure that the privilege escalation prompt, if any, has
# been dealt with, we can send any initial data and start waiting
# for output.
if states[state] == 'ready_to_send':
if in_data:
self._send_initial_data(stdin, in_data, p)
state += 1
# Now we're awaiting_exit: has the child process exited? If it has,
# and we've read all available output from it, we're done.
if poll is not None:
if not selector.get_map() or not events:
break
# We should not see further writes to the stdout/stderr file
# descriptors after the process has closed, set the select
# timeout to gather any last writes we may have missed.
timeout = 0
continue
# If the process has not yet exited, but we've already read EOF from
# its stdout and stderr (and thus no longer watching any file
# descriptors), we can just wait for it to exit.
elif not selector.get_map():
p.wait()
break
# Otherwise there may still be outstanding data to read.
finally:
selector.close()
# close stdin, stdout, and stderr after process is terminated and
# stdout/stderr are read completely (see also issues #848, #64768).
stdin.close()
p.stdout.close()
p.stderr.close()
if self.get_option('host_key_checking'):
if cmd[0] == b"sshpass" and p.returncode == 6:
raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support '
'this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
controlpersisterror = b'Bad configuration option: ControlPersist' in b_stderr or b'unknown configuration option: ControlPersist' in b_stderr
if p.returncode != 0 and controlpersisterror:
raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" '
'(or ssh_args in [ssh_connection] section of the config file) before running again')
# If we find a broken pipe because of ControlPersist timeout expiring (see #16731),
# we raise a special exception so that we can retry a connection.
controlpersist_broken_pipe = b'mux_client_hello_exchange: write packet: Broken pipe' in b_stderr
if p.returncode == 255:
additional = to_native(b_stderr)
if controlpersist_broken_pipe:
raise AnsibleControlPersistBrokenPipeError('Data could not be sent because of ControlPersist broken pipe: %s' % additional)
elif in_data and checkrc:
raise AnsibleConnectionFailure('Data could not be sent to remote host "%s". Make sure this host can be reached over ssh: %s'
% (self.host, additional))
return (p.returncode, b_stdout, b_stderr)
@_ssh_retry
def _run(self, cmd, in_data, sudoable=True, checkrc=True):
"""Wrapper around _bare_run that retries the connection
"""
return self._bare_run(cmd, in_data, sudoable=sudoable, checkrc=checkrc)
@_ssh_retry
def _file_transport_command(self, in_path, out_path, sftp_action):
# scp and sftp require square brackets for IPv6 addresses, but
# accept them for hostnames and IPv4 addresses too.
host = '[%s]' % self.host
smart_methods = ['sftp', 'scp', 'piped']
# Windows does not support dd so we cannot use the piped method
if getattr(self._shell, "_IS_WINDOWS", False):
smart_methods.remove('piped')
# Transfer methods to try
methods = []
# Use the transfer_method option if set, otherwise use scp_if_ssh
ssh_transfer_method = self.get_option('ssh_transfer_method')
scp_if_ssh = self.get_option('scp_if_ssh')
if ssh_transfer_method is None and scp_if_ssh == 'smart':
ssh_transfer_method = 'smart'
if ssh_transfer_method is not None:
if ssh_transfer_method == 'smart':
methods = smart_methods
else:
methods = [ssh_transfer_method]
else:
# since this can be a non-bool now, we need to handle it correctly
if not isinstance(scp_if_ssh, bool):
scp_if_ssh = scp_if_ssh.lower()
if scp_if_ssh in BOOLEANS:
scp_if_ssh = boolean(scp_if_ssh, strict=False)
elif scp_if_ssh != 'smart':
raise AnsibleOptionsError('scp_if_ssh needs to be one of [smart|True|False]')
if scp_if_ssh == 'smart':
methods = smart_methods
elif scp_if_ssh is True:
methods = ['scp']
else:
methods = ['sftp']
for method in methods:
returncode = stdout = stderr = None
if method == 'sftp':
cmd = self._build_command(self.get_option('sftp_executable'), 'sftp', to_bytes(host))
in_data = u"{0} {1} {2}\n".format(sftp_action, shlex.quote(in_path), shlex.quote(out_path))
in_data = to_bytes(in_data, nonstring='passthru')
(returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
elif method == 'scp':
scp = self.get_option('scp_executable')
if sftp_action == 'get':
cmd = self._build_command(scp, 'scp', u'{0}:{1}'.format(host, self._shell.quote(in_path)), out_path)
else:
cmd = self._build_command(scp, 'scp', in_path, u'{0}:{1}'.format(host, self._shell.quote(out_path)))
in_data = None
(returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
elif method == 'piped':
if sftp_action == 'get':
# we pass sudoable=False to disable pty allocation, which
# would end up mixing stdout/stderr and screwing with newlines
(returncode, stdout, stderr) = self.exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), sudoable=False)
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
out_file.write(stdout)
else:
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as f:
in_data = to_bytes(f.read(), nonstring='passthru')
if not in_data:
count = ' count=0'
else:
count = ''
(returncode, stdout, stderr) = self.exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), in_data=in_data, sudoable=False)
# Check the return code and rollover to next method if failed
if returncode == 0:
return (returncode, stdout, stderr)
else:
# If not in smart mode, the data will be printed by the raise below
if len(methods) > 1:
display.warning(u'%s transfer mechanism failed on %s. Use ANSIBLE_DEBUG=1 to see detailed information' % (method, host))
display.debug(u'%s' % to_text(stdout))
display.debug(u'%s' % to_text(stderr))
if returncode == 255:
raise AnsibleConnectionFailure("Failed to connect to the host via %s: %s" % (method, to_native(stderr)))
else:
raise AnsibleError("failed to transfer file to %s %s:\n%s\n%s" %
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
def _escape_win_path(self, path):
""" converts a Windows path to one that's supported by SFTP and SCP """
# If using a root path then we need to start with /
prefix = ""
if re.match(r'^\w{1}:', path):
prefix = "/"
# Convert all '\' to '/'
return "%s%s" % (prefix, path.replace("\\", "/"))
#
# Main public methods
#
def exec_command(self, cmd, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self.user), host=self.host)
if getattr(self._shell, "_IS_WINDOWS", False):
# Become method 'runas' is done in the wrapper that is executed,
# need to disable sudoable so the bare_run is not waiting for a
# prompt that will not occur
sudoable = False
# Make sure our first command is to set the console encoding to
# utf-8, this must be done via chcp to get utf-8 (65001)
cmd_parts = ["chcp.com", "65001", self._shell._SHELL_REDIRECT_ALLNULL, self._shell._SHELL_AND]
cmd_parts.extend(self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False))
cmd = ' '.join(cmd_parts)
# we can only use tty when we are not pipelining the modules. piping
# data into /usr/bin/python inside a tty automatically invokes the
# python interactive-mode but the modules are not compatible with the
# interactive-mode ("unexpected indent" mainly because of empty lines)
ssh_executable = self.get_option('ssh_executable')
# -tt can cause various issues in some environments so allow the user
# to disable it as a troubleshooting method.
use_tty = self.get_option('use_tty')
if not in_data and sudoable and use_tty:
args = ('-tt', self.host, cmd)
else:
args = (self.host, cmd)
cmd = self._build_command(ssh_executable, 'ssh', *args)
(returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)
# When running on Windows, stderr may contain CLIXML encoded output
if getattr(self._shell, "_IS_WINDOWS", False) and stderr.startswith(b"#< CLIXML"):
stderr = _parse_clixml(stderr)
return (returncode, stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
if getattr(self._shell, "_IS_WINDOWS", False):
out_path = self._escape_win_path(out_path)
return self._file_transport_command(in_path, out_path, 'put')
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
# need to add / if path is rooted
if getattr(self._shell, "_IS_WINDOWS", False):
in_path = self._escape_win_path(in_path)
return self._file_transport_command(in_path, out_path, 'get')
def reset(self):
run_reset = False
# If we have a persistent ssh connection (ControlPersist), we can ask it to stop listening.
# only run the reset if the ControlPath already exists or if it isn't configured and ControlPersist is set
# 'check' will determine this.
cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'check', self.host)
display.vvv(u'sending connection check: %s' % to_text(cmd))
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
status_code = p.wait()
if status_code != 0:
display.vvv(u"No connection to reset: %s" % to_text(stderr))
else:
run_reset = True
if run_reset:
cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'stop', self.host)
display.vvv(u'sending connection stop: %s' % to_text(cmd))
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
status_code = p.wait()
if status_code != 0:
display.warning(u"Failed to reset connection:%s" % to_text(stderr))
self.close()
def close(self):
self._connected = False
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,663 |
reboot module hangs when the verbosity is set to connection debug (-vvvv)
|
### Summary
Even though reboot module performs as expected with the versobity 3 (-vvv), when I set it to 4 (-vvvv) it hangs. I tested 2.9.16, 2.9.27 and 2.12.0.
I specified the `become_method=su`. When `become_method=sudo`, it timed out after 5 minutes and the job proceeds as expected.
### Issue Type
Bug Report
### Component Name
reboot
### Ansible Version
```console
### RHEL 8.2
$ ansible --version
ansible 2.9.16
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.6/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.6.8 (default, Dec 5 2019, 15:45:45) [GCC 8.3.1 20191121 (Red Hat 8.3.1-5)]
### RHEL 8.5
$ ansible --version
ansible [core 2.12.0]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /home/sugimura/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.8.8 (default, Aug 11 2021, 06:52:42) [GCC 8.5.0 20210514 (Red Hat 8.5.0-3)]
jinja version = 2.10.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
$
(nothing special)
```
### OS / Environment
RHEL 8.2, 8.5 as a control node. Managed nodes are also RHEL 8.2 and 8.5 that I tested.
### Steps to Reproduce
### playbook.yml
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
---
- hosts: all
gather_facts: false
become: true
tasks:
- name: reboot
reboot:
post_reboot_delay: 30
```
### inventory
```
$ cat inventory
### RHEL 8.2
192.168.0.155 ansible_become_method=su
#192.168.0.155 ansible_become_method=sudo
### RHEL 8.5
#192.168.0.154 ansible_become_method=su
#192.168.0.154 ansible_become_method=sudo
#192.168.0.154 ansible_become_method=su ansible_become_password=XXXXXXXX
#192.168.0.154 ansible_become_method=sudo ansible_become_password=XXXXXXXX
```
### command line
When specified `-vvvv`, it hangs.
```
$ ansible-playbook -i inventory playbook.yml -k -K -vvvv
```
On the other hand `-vvv`, it performs successfully.
```
$ ansible-playbook -i inventory playbook.yml -k -K -vvv
```
### Expected Results
It should be the same behavoir as `-vvv` below.
```
$ ansible-playbook -i inventory playbook.yml -k -K -vvv
ansible-playbook [core 2.12.0]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /home/sugimura/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible-playbook
python version = 3.8.8 (default, Aug 11 2021, 06:52:42) [GCC 8.5.0 20210514 (Red Hat 8.5.0-3)]
jinja version = 2.10.3
libyaml = True
Using /etc/ansible/ansible.cfg as config file
SSH password:
BECOME password[defaults to SSH password]:
...
Escalation succeeded
<192.168.0.155> (0, b'\r\nroot\r\n', b'Shared connection to 192.168.0.155 closed.\r\n')
reboot: system successfully rebooted
<192.168.0.155> ESTABLISH SSH CONNECTION FOR USER: None
<192.168.0.155> SSH: EXEC sshpass -d10 ssh -C -o ControlMaster=auto -o ControlPersist=60s -o ConnectTimeout=10 -o ControlPath=/home/sugimura/.ansible/cp/23220a1dab 192.168.0.155 '/bin/sh -c '"'"'rm -f -r /home/sugimura/.ansible/tmp/ansible-tmp-1641436348.6996555-196425-83290895724414/ > /dev/null 2>&1 && sleep 0'"'"''
<192.168.0.155> (0, b'', b'')
changed: [192.168.0.155] => {
"changed": true,
"elapsed": 30,
"rebooted": true
}
META: ran handlers
META: ran handlers
PLAY RECAP *******************************************************************************************************************************************************************
192.168.0.155 : ok=1 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Actual Results
It hangs.
```console
$ ansible-playbook -i inventory playbook.yml -k -K -vvvv
ansible-playbook [core 2.12.0]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /home/sugimura/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible-playbook
python version = 3.8.8 (default, Aug 11 2021, 06:52:42) [GCC 8.5.0 20210514 (Red Hat 8.5.0-3)]
jinja version = 2.10.3
libyaml = True
Using /etc/ansible/ansible.cfg as config file
SSH password:
BECOME password[defaults to SSH password]:
setting up inventory plugins
...
reboot: rebooting server...
<192.168.0.155> ESTABLISH SSH CONNECTION FOR USER: None
<192.168.0.155> SSH: EXEC sshpass -d10 ssh -vvv -C -o ControlMaster=auto -o ControlPersist=60s -o ConnectTimeout=10 -o ControlPath=/home/sugimura/.ansible/cp/23220a1dab -tt 192.168.0.155 '/bin/sh -c '"'"'su root -c '"'"'"'"'"'"'"'"'/bin/sh -c '"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-vqowuwkomqakrqejoekqdcsybawubrge ; /sbin/shutdown -r 0 "Reboot initiated by Ansible"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"''"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<192.168.0.155> (0, b"\r\nShutdown scheduled for Thu 2022-01-06 11:35:37 JST, use 'shutdown -c' to cancel.\r\n", b"OpenSSH_8.0p1, OpenSSL 1.1.1k FIPS 25 Mar 2021\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug3: /etc/ssh/ssh_config line 52: Including file /etc/ssh/ssh_config.d/05-redhat.conf depth 0\r\ndebug1: Reading configuration data /etc/ssh/ssh_config.d/05-redhat.conf\r\ndebug2: checking match for 'final all' host 192.168.0.155 originally 192.168.0.155\r\ndebug3: /etc/ssh/ssh_config.d/05-redhat.conf line 3: not matched 'final'\r\ndebug2: match not found\r\ndebug3: /etc/ssh/ssh_config.d/05-redhat.conf line 5: Including file /etc/crypto-policies/back-ends/openssh.config depth 1 (parse only)\r\ndebug1: Reading configuration data /etc/crypto-policies/back-ends/openssh.config\r\ndebug3: gss kex names ok: [gss-curve25519-sha256-,gss-nistp256-sha256-,gss-group14-sha256-,gss-group16-sha512-,gss-gex-sha1-,gss-group14-sha1-]\r\ndebug3: kex names ok: [curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1]\r\ndebug1: configuration requests final Match pass\r\ndebug2: resolve_canonicalize: hostname 192.168.0.155 is address\r\ndebug1: re-parsing configuration\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug3: /etc/ssh/ssh_config line 52: Including file /etc/ssh/ssh_config.d/05-redhat.conf depth 0\r\ndebug1: Reading configuration data /etc/ssh/ssh_config.d/05-redhat.conf\r\ndebug2: checking match for 'final all' host 192.168.0.155 originally 192.168.0.155\r\ndebug3: /etc/ssh/ssh_config.d/05-redhat.conf line 3: matched 'final'\r\ndebug2: match found\r\ndebug3: /etc/ssh/ssh_config.d/05-redhat.conf line 5: Including file /etc/crypto-policies/back-ends/openssh.config depth 1\r\ndebug1: Reading configuration data /etc/crypto-policies/back-ends/openssh.config\r\ndebug3: gss kex names ok: [gss-curve25519-sha256-,gss-nistp256-sha256-,gss-group14-sha256-,gss-group16-sha512-,gss-gex-sha1-,gss-group14-sha1-]\r\ndebug3: kex names ok: [curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1]\r\ndebug1: auto-mux: Trying existing master\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug2: mux_client_hello_exchange: master version 4\r\ndebug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r\ndebug3: mux_client_request_session: entering\r\ndebug3: mux_client_request_alive: entering\r\ndebug3: mux_client_request_alive: done pid = 196492\r\ndebug3: mux_client_request_session: session request sent\r\ndebug3: mux_client_read_packet: read header failed: Broken pipe\r\ndebug2: Received exit status from master 0\r\nShared connection to 192.168.0.155 closed.\r\n")
reboot: waiting an additional 30 seconds
reboot: validating reboot
reboot: attempting to get system boot time
<192.168.0.155> ESTABLISH SSH CONNECTION FOR USER: None
<192.168.0.155> SSH: EXEC sshpass -d10 ssh -vvv -C -o ControlMaster=auto -o ControlPersist=60s -o ConnectTimeout=10 -o ControlPath=/home/sugimura/.ansible/cp/23220a1dab -tt 192.168.0.155 '/bin/sh -c '"'"'su root -c '"'"'"'"'"'"'"'"'/bin/sh -c '"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-fwhdcdmwvpurpzythdrmsmpewqactjox ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"'"''"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
```
And here is the output with `ANSIBLE_DEBUG=1`.
```
$ ANSIBLE_DEBUG=1 ansible-playbook -i inventory playbook.yml -k -K -vvvv
196531 1641436626.42119: starting run
ansible-playbook [core 2.12.0]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/sugimura/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /home/sugimura/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible-playbook
python version = 3.8.8 (default, Aug 11 2021, 06:52:42) [GCC 8.5.0 20210514 (Red Hat 8.5.0-3)]
jinja version = 2.10.3
libyaml = True
Using /etc/ansible/ansible.cfg as config file
SSH password:
BECOME password[defaults to SSH password]:
196531 1641436629.13508: Added group all to inventory
196531 1641436629.13525: Added group ungrouped to inventory
196531 1641436629.13535: Group all now contains ungrouped
...
196536 1641436660.99666: become_success: (source=stderr, state=awaiting_prompt): 'debug2: mux_master_process_new_session: channel 1: request tty 1, X 0, agent 0, subsys 0, term "xterm-256color", cmd "/bin/sh -c 'su root -c '"'"'/bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-ozdcnvzirtcdftqbszityhfgcukzjzoe ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"''"'"' && sleep 0'", env 1'
Escalation succeeded
196536 1641436661.18745: stderr chunk (state=3):
>>>debug3: receive packet: type 80
debug1: client_input_global_request: rtype [email protected] want_reply 0
<<<
196536 1641436661.22876: stderr chunk (state=3):
>>>debug3: receive packet: type 91
debug2: channel_input_open_confirmation: channel 2: callback start
debug2: client_session2_setup: id 2
debug2: channel 2: request pty-req confirm 1
debug3: send packet: type 98
<<<
196536 1641436661.22899: stderr chunk (state=3):
>>>debug1: Sending environment.
debug1: Sending env LANG = ja_JP.UTF-8
debug2: channel 2: request env confirm 0
debug3: send packet: type 98
debug1: Sending command: /bin/sh -c 'su root -c '"'"'/bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-ozdcnvzirtcdftqbszityhfgcukzjzoe ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"''"'"' && sleep 0'
debug2: channel 2: request exec confirm 1
debug3: send packet: type 98
debug3: mux_session_confirm: sending success reply
debug2: channel_input_open_confirmation: channel 2: callback done
debug2: channel 2: open confirm rwindow 0 rmax 32768
<<<
196536 1641436661.23997: stderr chunk (state=3):
>>>debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: PTY allocation request accepted on channel 2
debug2: channel 2: rcvd adjust 2097152
debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: exec request accepted on channel 2
<<<
196536 1641436661.61510: stdout chunk (state=3):
>>>パスワード:<<<
```
The `パスワード:` means `Password:` in Japanese. I tested with `LANG=C`, it is the same behavior.
```
...
debug1: Sending command: /bin/sh -c 'su root -c '"'"'/bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-xmjdcyhcajgoprxvrmtkyqdyufnzfdeo ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"''"'"' && sleep 0'
debug2: channel 2: request exec confirm 1
debug3: send packet: type 98
<<<
196581 1641436784.82305: stderr chunk (state=3):
>>>debug3: mux_session_confirm: sending success reply
debug2: channel_input_open_confirmation: channel 2: callback done
debug2: channel 2: open confirm rwindow 0 rmax 32768
<<<
196581 1641436784.83332: stderr chunk (state=3):
>>>debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: PTY allocation request accepted on channel 2
debug2: channel 2: rcvd adjust 2097152
debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: exec request accepted on channel 2
<<<
196581 1641436785.19981: stdout chunk (state=3):
>>>Password: <<<
```
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76663
|
https://github.com/ansible/ansible/pull/76732
|
9142be2f6cabbe6597c9254c5bb9186d17036d55
|
0ff80a15ba40c2aff3b96c1152f19c97a92d3c97
| 2022-01-06T02:40:31Z |
python
| 2022-01-13T21:28:09Z |
test/units/plugins/connection/test_ssh.py
|
# -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
import pytest
from ansible import constants as C
from ansible.errors import AnsibleAuthenticationFailure
from units.compat import unittest
from units.compat.mock import patch, MagicMock, PropertyMock
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.module_utils.compat.selectors import SelectorKey, EVENT_READ
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import ssh
from ansible.plugins.loader import connection_loader, become_loader
class TestConnectionBaseClass(unittest.TestCase):
def test_plugins_connection_ssh_module(self):
play_context = PlayContext()
play_context.prompt = (
'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
)
in_stream = StringIO()
self.assertIsInstance(ssh.Connection(play_context, in_stream), ssh.Connection)
def test_plugins_connection_ssh_basic(self):
pc = PlayContext()
new_stdin = StringIO()
conn = ssh.Connection(pc, new_stdin)
# connect just returns self, so assert that
res = conn._connect()
self.assertEqual(conn, res)
ssh.SSHPASS_AVAILABLE = False
self.assertFalse(conn._sshpass_available())
ssh.SSHPASS_AVAILABLE = True
self.assertTrue(conn._sshpass_available())
with patch('subprocess.Popen') as p:
ssh.SSHPASS_AVAILABLE = None
p.return_value = MagicMock()
self.assertTrue(conn._sshpass_available())
ssh.SSHPASS_AVAILABLE = None
p.return_value = None
p.side_effect = OSError()
self.assertFalse(conn._sshpass_available())
conn.close()
self.assertFalse(conn._connected)
def test_plugins_connection_ssh__build_command(self):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn.get_option = MagicMock()
conn.get_option.return_value = ""
conn._build_command('ssh', 'ssh')
def test_plugins_connection_ssh_exec_command(self):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn._build_command = MagicMock()
conn._build_command.return_value = 'ssh something something'
conn._run = MagicMock()
conn._run.return_value = (0, 'stdout', 'stderr')
conn.get_option = MagicMock()
conn.get_option.return_value = True
res, stdout, stderr = conn.exec_command('ssh')
res, stdout, stderr = conn.exec_command('ssh', 'this is some data')
def test_plugins_connection_ssh__examine_output(self):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn.set_become_plugin(become_loader.get('sudo'))
conn.become.check_password_prompt = MagicMock()
conn.become.check_success = MagicMock()
conn.become.check_incorrect_password = MagicMock()
conn.become.check_missing_password = MagicMock()
def _check_password_prompt(line):
if b'foo' in line:
return True
return False
def _check_become_success(line):
if b'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz' in line:
return True
return False
def _check_incorrect_password(line):
if b'incorrect password' in line:
return True
return False
def _check_missing_password(line):
if b'bad password' in line:
return True
return False
# test examining output for prompt
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = True
# override become plugin
conn.become.prompt = True
conn.become.check_password_prompt = MagicMock(side_effect=_check_password_prompt)
conn.become.check_success = MagicMock(side_effect=_check_become_success)
conn.become.check_incorrect_password = MagicMock(side_effect=_check_incorrect_password)
conn.become.check_missing_password = MagicMock(side_effect=_check_missing_password)
def get_option(option):
if option == 'become_pass':
return 'password'
return None
conn.become.get_option = get_option
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nfoo\nline 3\nthis should be the remainder', False)
self.assertEqual(output, b'line 1\nline 2\nline 3\n')
self.assertEqual(unprocessed, b'this should be the remainder')
self.assertTrue(conn._flags['become_prompt'])
self.assertFalse(conn._flags['become_success'])
self.assertFalse(conn._flags['become_error'])
self.assertFalse(conn._flags['become_nopasswd_error'])
# test examining output for become prompt
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = False
conn.become.prompt = False
pc.success_key = u'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz'
conn.become.success = u'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz'
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nBECOME-SUCCESS-abcdefghijklmnopqrstuvxyz\nline 3\n', False)
self.assertEqual(output, b'line 1\nline 2\nline 3\n')
self.assertEqual(unprocessed, b'')
self.assertFalse(conn._flags['become_prompt'])
self.assertTrue(conn._flags['become_success'])
self.assertFalse(conn._flags['become_error'])
self.assertFalse(conn._flags['become_nopasswd_error'])
# test examining output for become failure
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = False
conn.become.prompt = False
pc.success_key = None
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nincorrect password\n', True)
self.assertEqual(output, b'line 1\nline 2\nincorrect password\n')
self.assertEqual(unprocessed, b'')
self.assertFalse(conn._flags['become_prompt'])
self.assertFalse(conn._flags['become_success'])
self.assertTrue(conn._flags['become_error'])
self.assertFalse(conn._flags['become_nopasswd_error'])
# test examining output for missing password
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = False
conn.become.prompt = False
pc.success_key = None
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nbad password\n', True)
self.assertEqual(output, b'line 1\nbad password\n')
self.assertEqual(unprocessed, b'')
self.assertFalse(conn._flags['become_prompt'])
self.assertFalse(conn._flags['become_success'])
self.assertFalse(conn._flags['become_error'])
self.assertTrue(conn._flags['become_nopasswd_error'])
@patch('time.sleep')
@patch('os.path.exists')
def test_plugins_connection_ssh_put_file(self, mock_ospe, mock_sleep):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn._build_command = MagicMock()
conn._bare_run = MagicMock()
mock_ospe.return_value = True
conn._build_command.return_value = 'some command to run'
conn._bare_run.return_value = (0, '', '')
conn.host = "some_host"
conn.set_option('reconnection_retries', 9)
conn.set_option('ssh_transfer_method', None) # unless set to None scp_if_ssh is ignored
# Test with SCP_IF_SSH set to smart
# Test when SFTP works
conn.set_option('scp_if_ssh', 'smart')
expected_in_data = b' '.join((b'put', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# Test when SFTP doesn't work but SCP does
conn._bare_run.side_effect = [(1, 'stdout', 'some errors'), (0, '', '')]
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
conn._bare_run.side_effect = None
# test with SCP_IF_SSH enabled
conn.set_option('scp_if_ssh', True)
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
# test with SCPP_IF_SSH disabled
conn.set_option('scp_if_ssh', False)
expected_in_data = b' '.join((b'put', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
expected_in_data = b' '.join((b'put',
to_bytes(shlex_quote('/path/to/in/file/with/unicode-fö〩')),
to_bytes(shlex_quote('/path/to/dest/file/with/unicode-fö〩')))) + b'\n'
conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# test that a non-zero rc raises an error
conn._bare_run.return_value = (1, 'stdout', 'some errors')
self.assertRaises(AnsibleError, conn.put_file, '/path/to/bad/file', '/remote/path/to/file')
# test that a not-found path raises an error
mock_ospe.return_value = False
conn._bare_run.return_value = (0, 'stdout', '')
self.assertRaises(AnsibleFileNotFound, conn.put_file, '/path/to/bad/file', '/remote/path/to/file')
@patch('time.sleep')
def test_plugins_connection_ssh_fetch_file(self, mock_sleep):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn._build_command = MagicMock()
conn._bare_run = MagicMock()
conn._load_name = 'ssh'
conn._build_command.return_value = 'some command to run'
conn._bare_run.return_value = (0, '', '')
conn.host = "some_host"
conn.set_option('reconnection_retries', 9)
conn.set_option('ssh_transfer_method', None) # unless set to None scp_if_ssh is ignored
# Test with SCP_IF_SSH set to smart
# Test when SFTP works
conn.set_option('scp_if_ssh', 'smart')
expected_in_data = b' '.join((b'get', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.set_options({})
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# Test when SFTP doesn't work but SCP does
conn._bare_run.side_effect = [(1, 'stdout', 'some errors'), (0, '', '')]
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
# test with SCP_IF_SSH enabled
conn._bare_run.side_effect = None
conn.set_option('ssh_transfer_method', None) # unless set to None scp_if_ssh is ignored
conn.set_option('scp_if_ssh', 'True')
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
# test with SCP_IF_SSH disabled
conn.set_option('scp_if_ssh', False)
expected_in_data = b' '.join((b'get', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
expected_in_data = b' '.join((b'get',
to_bytes(shlex_quote('/path/to/in/file/with/unicode-fö〩')),
to_bytes(shlex_quote('/path/to/dest/file/with/unicode-fö〩')))) + b'\n'
conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# test that a non-zero rc raises an error
conn._bare_run.return_value = (1, 'stdout', 'some errors')
self.assertRaises(AnsibleError, conn.fetch_file, '/path/to/bad/file', '/remote/path/to/file')
class MockSelector(object):
def __init__(self):
self.files_watched = 0
self.register = MagicMock(side_effect=self._register)
self.unregister = MagicMock(side_effect=self._unregister)
self.close = MagicMock()
self.get_map = MagicMock(side_effect=self._get_map)
self.select = MagicMock()
def _register(self, *args, **kwargs):
self.files_watched += 1
def _unregister(self, *args, **kwargs):
self.files_watched -= 1
def _get_map(self, *args, **kwargs):
return self.files_watched
@pytest.fixture
def mock_run_env(request, mocker):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn.set_become_plugin(become_loader.get('sudo'))
conn._send_initial_data = MagicMock()
conn._examine_output = MagicMock()
conn._terminate_process = MagicMock()
conn._load_name = 'ssh'
conn.sshpass_pipe = [MagicMock(), MagicMock()]
request.cls.pc = pc
request.cls.conn = conn
mock_popen_res = MagicMock()
mock_popen_res.poll = MagicMock()
mock_popen_res.wait = MagicMock()
mock_popen_res.stdin = MagicMock()
mock_popen_res.stdin.fileno.return_value = 1000
mock_popen_res.stdout = MagicMock()
mock_popen_res.stdout.fileno.return_value = 1001
mock_popen_res.stderr = MagicMock()
mock_popen_res.stderr.fileno.return_value = 1002
mock_popen_res.returncode = 0
request.cls.mock_popen_res = mock_popen_res
mock_popen = mocker.patch('subprocess.Popen', return_value=mock_popen_res)
request.cls.mock_popen = mock_popen
request.cls.mock_selector = MockSelector()
mocker.patch('ansible.module_utils.compat.selectors.DefaultSelector', lambda: request.cls.mock_selector)
request.cls.mock_openpty = mocker.patch('pty.openpty')
mocker.patch('fcntl.fcntl')
mocker.patch('os.write')
mocker.patch('os.close')
@pytest.mark.usefixtures('mock_run_env')
class TestSSHConnectionRun(object):
# FIXME:
# These tests are little more than a smoketest. Need to enhance them
# a bit to check that they're calling the relevant functions and making
# complete coverage of the code paths
def test_no_escalation(self):
self.mock_popen_res.stdout.read.side_effect = [b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"my_stderr"]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
assert return_code == 0
assert b_stdout == b'my_stdout\nsecond_line'
assert b_stderr == b'my_stderr'
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
def test_with_password(self):
# test with a password set to trigger the sshpass write
self.pc.password = '12345'
self.mock_popen_res.stdout.read.side_effect = [b"some data", b"", b""]
self.mock_popen_res.stderr.read.side_effect = [b""]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run(["ssh", "is", "a", "cmd"], "this is more data")
assert return_code == 0
assert b_stdout == b'some data'
assert b_stderr == b''
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is more data'
def _password_with_prompt_examine_output(self, sourice, state, b_chunk, sudoable):
if state == 'awaiting_prompt':
self.conn._flags['become_prompt'] = True
elif state == 'awaiting_escalation':
self.conn._flags['become_success'] = True
return (b'', b'')
def test_password_with_prompt(self):
# test with password prompting enabled
self.pc.password = None
self.conn.become.prompt = b'Password:'
self.conn._examine_output.side_effect = self._password_with_prompt_examine_output
self.mock_popen_res.stdout.read.side_effect = [b"Password:", b"Success", b""]
self.mock_popen_res.stderr.read.side_effect = [b""]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ),
(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
assert return_code == 0
assert b_stdout == b''
assert b_stderr == b''
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
def test_password_with_become(self):
# test with some become settings
self.pc.prompt = b'Password:'
self.conn.become.prompt = b'Password:'
self.pc.become = True
self.pc.success_key = 'BECOME-SUCCESS-abcdefg'
self.conn.become._id = 'abcdefg'
self.conn._examine_output.side_effect = self._password_with_prompt_examine_output
self.mock_popen_res.stdout.read.side_effect = [b"Password:", b"BECOME-SUCCESS-abcdefg", b"abc"]
self.mock_popen_res.stderr.read.side_effect = [b"123"]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
self.mock_popen_res.stdin.flush.assert_called_once_with()
assert return_code == 0
assert b_stdout == b'abc'
assert b_stderr == b'123'
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
def test_pasword_without_data(self):
# simulate no data input but Popen using new pty's fails
self.mock_popen.return_value = None
self.mock_popen.side_effect = [OSError(), self.mock_popen_res]
# simulate no data input
self.mock_openpty.return_value = (98, 99)
self.mock_popen_res.stdout.read.side_effect = [b"some data", b"", b""]
self.mock_popen_res.stderr.read.side_effect = [b""]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "")
assert return_code == 0
assert b_stdout == b'some data'
assert b_stderr == b''
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is False
@pytest.mark.usefixtures('mock_run_env')
class TestSSHConnectionRetries(object):
def test_incorrect_password(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 5)
monkeypatch.setattr('time.sleep', lambda x: None)
self.mock_popen_res.stdout.read.side_effect = [b'']
self.mock_popen_res.stderr.read.side_effect = [b'Permission denied, please try again.\r\n']
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[5] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = [b'sshpass', b'-d41', b'ssh', b'-C']
exception_info = pytest.raises(AnsibleAuthenticationFailure, self.conn.exec_command, 'sshpass', 'some data')
assert exception_info.value.message == ('Invalid/incorrect username/password. Skipping remaining 5 retries to prevent account lockout: '
'Permission denied, please try again.')
assert self.mock_popen.call_count == 1
def test_retry_then_success(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 3)
monkeypatch.setattr('time.sleep', lambda x: None)
self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 3 + [0] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'ssh'
return_code, b_stdout, b_stderr = self.conn.exec_command('ssh', 'some data')
assert return_code == 0
assert b_stdout == b'my_stdout\nsecond_line'
assert b_stderr == b'my_stderr'
def test_multiple_failures(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 9)
monkeypatch.setattr('time.sleep', lambda x: None)
self.mock_popen_res.stdout.read.side_effect = [b""] * 10
self.mock_popen_res.stderr.read.side_effect = [b""] * 10
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 30)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
] * 10
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'ssh'
pytest.raises(AnsibleConnectionFailure, self.conn.exec_command, 'ssh', 'some data')
assert self.mock_popen.call_count == 10
def test_abitrary_exceptions(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 9)
monkeypatch.setattr('time.sleep', lambda x: None)
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'ssh'
self.mock_popen.side_effect = [Exception('bad')] * 10
pytest.raises(Exception, self.conn.exec_command, 'ssh', 'some data')
assert self.mock_popen.call_count == 10
def test_put_file_retries(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 3)
monkeypatch.setattr('time.sleep', lambda x: None)
monkeypatch.setattr('ansible.plugins.connection.ssh.os.path.exists', lambda x: True)
self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 4 + [0] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'sftp'
return_code, b_stdout, b_stderr = self.conn.put_file('/path/to/in/file', '/path/to/dest/file')
assert return_code == 0
assert b_stdout == b"my_stdout\nsecond_line"
assert b_stderr == b"my_stderr"
assert self.mock_popen.call_count == 2
def test_fetch_file_retries(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 3)
monkeypatch.setattr('time.sleep', lambda x: None)
monkeypatch.setattr('ansible.plugins.connection.ssh.os.path.exists', lambda x: True)
self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 4 + [0] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'sftp'
return_code, b_stdout, b_stderr = self.conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
assert return_code == 0
assert b_stdout == b"my_stdout\nsecond_line"
assert b_stderr == b"my_stderr"
assert self.mock_popen.call_count == 2
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,834 |
Extra Verbosity fails play
|
### Summary
When I use `become: yes` and turn the logging verbosity to `-vvv` it works perfectly fine. When the verbosity is increased to `-vvvv` every run fails at the very beginning. Without `become: yes` even the `-vvvv` passes.
### Issue Type
Bug Report
### Component Name
Core
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = None
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.8.10 (default, Jun 2 2021, 10:49:15) [GCC 9.4.0]
jinja version = 3.0.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
ANSIBLE_PIPELINING(/workspace/ansible.cfg) = True
CALLBACKS_ENABLED(/workspace/ansible.cfg) = ['ansible.posix.profile_tasks', 'community.general.log_plays']
DEFAULT_FORKS(/workspace/ansible.cfg) = 10
DEFAULT_GATHERING(/workspace/ansible.cfg) = smart
DEFAULT_ROLES_PATH(env: ANSIBLE_ROLES_PATH) = ['/workspace/common-ansible/roles']
INTERPRETER_PYTHON(/workspace/ansible.cfg) = auto
```
### OS / Environment
Ubuntu 20.04.3 LTS
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
---
- hosts: gitlab_runners
become: true
tasks:
- debug:
msg=Success
```
### Expected Results
Play should run through with a lot of debug messages.
### Actual Results
Ouptut with `-vvvv`
```console
ansible-playbook [core 2.11.5]
config file = /workspace/ansible.cfg
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible-playbook
python version = 3.8.10 (default, Jun 2 2021, 10:49:15) [GCC 9.4.0]
jinja version = 3.0.1
libyaml = True
Using /workspace/ansible.cfg as config file
setting up inventory plugins
host_list declined parsing /workspace/inventories/test/hosts.yml as it did not pass its verify_file() method
script declined parsing /workspace/inventories/test/hosts.yml as it did not pass its verify_file() method
Parsed /workspace/inventories/test/hosts.yml inventory source with yaml plugin
Loading callback plugin default of type stdout, v2.0 from /usr/local/lib/python3.8/dist-packages/ansible/plugins/callback/default.py
Loading collection ansible.posix from /usr/local/lib/python3.8/dist-packages/ansible_collections/ansible/posix
Loading collection community.general from /usr/local/lib/python3.8/dist-packages/ansible_collections/community/general
Skipping callback 'default', as we already have a stdout callback.
Skipping callback 'minimal', as we already have a stdout callback.
Skipping callback 'oneline', as we already have a stdout callback.
Loading callback plugin ansible.posix.profile_tasks of type aggregate, v2.0 from /usr/local/lib/python3.8/dist-packages/ansible_collections/ansible/posix/plugins/callback/profile_tasks.py
Loading callback plugin community.general.log_plays of type notification, v2.0 from /usr/local/lib/python3.8/dist-packages/ansible_collections/community/general/plugins/callback/log_plays.py
PLAYBOOK: test.yml *************************************************************
Positional arguments: test.yml
verbosity: 4
connection: smart
timeout: 10
become_method: sudo
tags: ('all',)
inventory: ('/workspace/inventories/test/hosts.yml',)
forks: 10
1 plays in test.yml
PLAY [gitlab_runners] **********************************************************
Trying secret ScriptVaultSecret(filename='/opt/print_vault_password.py') for vault_id=default
TASK [Gathering Facts] *********************************************************
task path: /workspace/test.yml:2
Thursday 30 September 2021 14:38:16 +0000 (0:00:00.060) 0:00:00.060 ****
Using module file /usr/local/lib/python3.8/dist-packages/ansible/modules/setup.py
Pipelining is enabled.
<10.1.1.201> ESTABLISH SSH CONNECTION FOR USER: manager
<10.1.1.201> SSH: EXEC sshpass -d10 ssh -vvv -C -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o 'User="manager"' -o ConnectTimeout=10 -o StrictHostKeyChecking=no -o ControlPath=/root/.ansible/cp/48372ad7a3 10.1.1.201 '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-muszsaqzcjtetkenkeiiqhqnnsdqlvmx ; python3'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<10.1.1.201> (1, b'', b'OpenSSH_8.2p1 Ubuntu-4ubuntu0.3, OpenSSL 1.1.1f 31 Mar 2020\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files\r\ndebug1: /etc/ssh/ssh_config line 21: Applying options for *\r\ndebug2: resolve_canonicalize: hostname 10.1.1.201 is address\r\ndebug1: auto-mux: Trying existing master\r\ndebug1: Control socket "/root/.ansible/cp/48372ad7a3" does not exist\r\ndebug2: ssh_connect_direct\r\ndebug1: Connecting to 10.1.1.201 [10.1.1.201] port 22.\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: fd 3 clearing O_NONBLOCK\r\ndebug1: Connection established.\r\ndebug3: timeout: 10000 ms remain after connect\r\ndebug1: SELinux support disabled\r\ndebug1: identity file /root/.ssh/id_rsa type -1\r\ndebug1: identity file /root/.ssh/id_rsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_dsa type -1\r\ndebug1: identity file /root/.ssh/id_dsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa_sk type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa_sk-cert type -1\r\ndebug1: identity file /root/.ssh/id_ed25519 type -1\r\ndebug1: identity file /root/.ssh/id_ed25519-cert type -1\r\ndebug1: identity file /root/.ssh/id_ed25519_sk type -1\r\ndebug1: identity file /root/.ssh/id_ed25519_sk-cert type -1\r\ndebug1: identity file /root/.ssh/id_xmss type -1\r\ndebug1: identity file /root/.ssh/id_xmss-cert type -1\r\ndebug1: Local version string SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3\r\ndebug1: Remote protocol version 2.0, remote software version OpenSSH_8.2p1 Ubuntu-4ubuntu0.3\r\ndebug1: match: OpenSSH_8.2p1 Ubuntu-4ubuntu0.3 pat OpenSSH* compat 0x04000000\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: Authenticating to 10.1.1.201:22 as \'manager\'\r\ndebug3: send packet: type 20\r\ndebug1: SSH2_MSG_KEXINIT sent\r\ndebug3: receive packet: type 20\r\ndebug1: SSH2_MSG_KEXINIT received\r\ndebug2: local client KEXINIT proposal\r\ndebug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256,ext-info-c\r\ndebug2: host key algorithms: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected],ssh-ed25519,[email protected],rsa-sha2-512,rsa-sha2-256,ssh-rsa\r\ndebug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: compression ctos: [email protected],zlib,none\r\ndebug2: compression stoc: [email protected],zlib,none\r\ndebug2: languages ctos: \r\ndebug2: languages stoc: \r\ndebug2: first_kex_follows 0 \r\ndebug2: reserved 0 \r\ndebug2: peer server KEXINIT proposal\r\ndebug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256\r\ndebug2: host key algorithms: rsa-sha2-512,rsa-sha2-256,ssh-rsa,ecdsa-sha2-nistp256,ssh-ed25519\r\ndebug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: compression ctos: none,[email protected]\r\ndebug2: compression stoc: none,[email protected]\r\ndebug2: languages ctos: \r\ndebug2: languages stoc: \r\ndebug2: first_kex_follows 0 \r\ndebug2: reserved 0 \r\ndebug1: kex: algorithm: curve25519-sha256\r\ndebug1: kex: host key algorithm: ecdsa-sha2-nistp256\r\ndebug1: kex: server->client cipher: [email protected] MAC: <implicit> compression: [email protected]\r\ndebug1: kex: client->server cipher: [email protected] MAC: <implicit> compression: [email protected]\r\ndebug3: send packet: type 30\r\ndebug1: expecting SSH2_MSG_KEX_ECDH_REPLY\r\ndebug3: receive packet: type 31\r\ndebug1: Server host key: ecdsa-sha2-nistp256 SHA256:LylIOfTqWm+geE35hisc6xKHr/ZHTHTs/UQn80pYXOo\r\nWarning: Permanently added \'10.1.1.201\' (ECDSA) to the list of known hosts.\r\ndebug3: send packet: type 21\r\ndebug2: set_newkeys: mode 1\r\ndebug1: rekey out after 134217728 blocks\r\ndebug1: SSH2_MSG_NEWKEYS sent\r\ndebug1: expecting SSH2_MSG_NEWKEYS\r\ndebug3: receive packet: type 21\r\ndebug1: SSH2_MSG_NEWKEYS received\r\ndebug2: set_newkeys: mode 0\r\ndebug1: rekey in after 134217728 blocks\r\ndebug1: Will attempt key: /root/.ssh/id_rsa \r\ndebug1: Will attempt key: /root/.ssh/id_dsa \r\ndebug1: Will attempt key: /root/.ssh/id_ecdsa \r\ndebug1: Will attempt key: /root/.ssh/id_ecdsa_sk \r\ndebug1: Will attempt key: /root/.ssh/id_ed25519 \r\ndebug1: Will attempt key: /root/.ssh/id_ed25519_sk \r\ndebug1: Will attempt key: /root/.ssh/id_xmss \r\ndebug2: pubkey_prepare: done\r\ndebug3: send packet: type 5\r\ndebug3: receive packet: type 7\r\ndebug1: SSH2_MSG_EXT_INFO received\r\ndebug1: kex_input_ext_info: server-sig-algs=<ssh-ed25519,[email protected],ssh-rsa,rsa-sha2-256,rsa-sha2-512,ssh-dss,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected]>\r\ndebug3: receive packet: type 6\r\ndebug2: service_accept: ssh-userauth\r\ndebug1: SSH2_MSG_SERVICE_ACCEPT received\r\ndebug3: send packet: type 50\r\ndebug3: receive packet: type 51\r\ndebug1: Authentications that can continue: publickey,password\r\ndebug3: start over, passed a different list publickey,password\r\ndebug3: preferred gssapi-with-mic,publickey,keyboard-interactive,password\r\ndebug3: authmethod_lookup publickey\r\ndebug3: remaining preferred: keyboard-interactive,password\r\ndebug3: authmethod_is_enabled publickey\r\ndebug1: Next authentication method: publickey\r\ndebug1: Trying private key: /root/.ssh/id_rsa\r\ndebug3: no such identity: /root/.ssh/id_rsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_dsa\r\ndebug3: no such identity: /root/.ssh/id_dsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ecdsa\r\ndebug3: no such identity: /root/.ssh/id_ecdsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ecdsa_sk\r\ndebug3: no such identity: /root/.ssh/id_ecdsa_sk: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ed25519\r\ndebug3: no such identity: /root/.ssh/id_ed25519: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ed25519_sk\r\ndebug3: no such identity: /root/.ssh/id_ed25519_sk: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_xmss\r\ndebug3: no such identity: /root/.ssh/id_xmss: No such file or directory\r\ndebug2: we did not send a packet, disable method\r\ndebug3: authmethod_lookup password\r\ndebug3: remaining preferred: ,password\r\ndebug3: authmethod_is_enabled password\r\ndebug1: Next authentication method: password\r\ndebug3: send packet: type 50\r\ndebug2: we sent a password packet, wait for reply\r\ndebug3: receive packet: type 52\r\ndebug1: Enabling compression at level 6.\r\ndebug1: Authentication succeeded (password).\r\nAuthenticated to 10.1.1.201 ([10.1.1.201]:22).\r\ndebug1: setting up multiplex master socket\r\ndebug3: muxserver_listen: temporary control path /root/.ansible/cp/48372ad7a3.XW29Aj350ciuha3l\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug3: fd 4 is O_NONBLOCK\r\ndebug3: fd 4 is O_NONBLOCK\r\ndebug1: channel 0: new [/root/.ansible/cp/48372ad7a3]\r\ndebug3: muxserver_listen: mux listener channel 0 fd 4\r\ndebug2: fd 3 setting TCP_NODELAY\r\ndebug3: ssh_packet_set_tos: set IP_TOS 0x08\r\ndebug1: control_persist_detach: backgrounding master process\r\ndebug2: control_persist_detach: background process is 16\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug1: forking to background\r\ndebug1: Entering interactive session.\r\ndebug1: pledge: id\r\ndebug2: set_control_persist_exit_time: schedule exit in 60 seconds\r\ndebug1: multiplexing control connection\r\ndebug2: fd 5 setting O_NONBLOCK\r\ndebug3: fd 5 is O_NONBLOCK\r\ndebug1: channel 1: new [mux-control]\r\ndebug3: channel_post_mux_listener: new mux channel 1 fd 5\r\ndebug3: mux_master_read_cb: channel 1: hello sent\r\ndebug2: set_control_persist_exit_time: cancel scheduled exit\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x00000001 len 4\r\ndebug2: mux_master_process_hello: channel 1 slave version 4\r\ndebug2: mux_client_hello_exchange: master version 4\r\ndebug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r\ndebug3: mux_client_request_session: entering\r\ndebug3: mux_client_request_alive: entering\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x10000004 len 4\r\ndebug2: mux_master_process_alive_check: channel 1: alive check\r\ndebug3: mux_client_request_alive: done pid = 18\r\ndebug3: mux_client_request_session: session request sent\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 256\r\ndebug3: mux_master_process_new_session: got fds stdin 6, stdout 7, stderr 8\r\ndebug2: fd 6 setting O_NONBLOCK\r\ndebug2: fd 7 setting O_NONBLOCK\r\ndebug2: fd 8 setting O_NONBLOCK\r\ndebug1: channel 2: new [client-session]\r\ndebug2: mux_master_process_new_session: channel_new: 2 linked to control channel 1\r\ndebug2: channel 2: send open\r\ndebug3: send packet: type 90\r\ndebug3: receive packet: type 80\r\ndebug1: client_input_global_request: rtype [email protected] want_reply 0\r\ndebug3: receive packet: type 91\r\ndebug2: channel_input_open_confirmation: channel 2: callback start\r\ndebug2: client_session2_setup: id 2\r\ndebug1: Sending environment.\r\ndebug1: Sending env LANG = C.UTF-8\r\ndebug2: channel 2: request env confirm 0\r\ndebug3: send packet: type 98\r\ndebug1: Sending command: /bin/sh -c \'sudo -H -S -p "[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:" -u root /bin/sh -c \'"\'"\'echo BECOME-SUCCESS-muszsaqzcjtetkenkeiiqhqnnsdqlvmx ; python3\'"\'"\' && sleep 0\'\r\ndebug2: channel 2: request exec confirm 1\r\ndebug3: send packet: type 98\r\ndebug3: mux_session_confirm: sending success reply\r\ndebug2: channel_input_open_confirmation: channel 2: callback done\r\ndebug2: channel 2: open confirm rwindow 0 rmax 32768\r\ndebug2: channel 2: rcvd adjust 2097152\r\ndebug3: receive packet: type 99\r\ndebug2: channel_input_status_confirm: type 99 id 2\r\ndebug2: exec request accepted on channel 2\r\ndebug2: channel 2: read<=0 rfd 6 len 0\r\ndebug2: channel 2: read failed\r\ndebug2: channel 2: chan_shutdown_read (i0 o0 sock -1 wfd 6 efd 8 [write])\r\ndebug2: channel 2: input open -> drain\r\ndebug2: channel 2: ibuf empty\r\ndebug2: channel 2: send eof\r\ndebug3: send packet: type 96\r\ndebug2: channel 2: input drain -> closed\r\ndebug2: channel 2: rcvd adjust 65536\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 18\r\nSorry, try again.\ndebug2: channel 2: written 18 to efd 8\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 18\r\nSorry, try again.\ndebug2: channel 2: written 18 to efd 8\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 36\r\nsudo: 3 incorrect password attempts\ndebug2: channel 2: written 36 to efd 8\r\ndebug3: receive packet: type 98\r\ndebug1: client_input_channel_req: channel 2 rtype [email protected] reply 0\r\ndebug2: channel 2: rcvd eow\r\ndebug3: receive packet: type 96\r\ndebug2: channel 2: rcvd eof\r\ndebug2: channel 2: output open -> drain\r\ndebug2: channel 2: obuf empty\r\ndebug2: channel 2: chan_shutdown_write (i3 o1 sock -1 wfd 7 efd 8 [write])\r\ndebug2: channel 2: output drain -> closed\r\ndebug2: channel 2: send close\r\ndebug3: send packet: type 97\r\ndebug3: channel 2: will not send data after close\r\ndebug3: receive packet: type 98\r\ndebug1: client_input_channel_req: channel 2 rtype exit-status reply 0\r\ndebug3: mux_exit_message: channel 2: exit message, exitval 1\r\ndebug3: receive packet: type 97\r\ndebug2: channel 2: rcvd close\r\ndebug3: channel 2: will not send data after close\r\ndebug2: channel 2: is dead\r\ndebug2: channel 2: gc: notify user\r\ndebug3: mux_master_session_cleanup_cb: entering for channel 2\r\ndebug2: channel 1: rcvd close\r\ndebug2: channel 1: output open -> drain\r\ndebug2: channel 1: chan_shutdown_read (i0 o1 sock 5 wfd 5 efd -1 [closed])\r\ndebug2: channel 1: input open -> closed\r\ndebug2: channel 2: gc: user detached\r\ndebug2: channel 2: is dead\r\ndebug2: channel 2: garbage collecting\r\ndebug1: channel 2: free: client-session, nchannels 3\r\ndebug3: channel 2: status: The following connections are open:\r\n #1 mux-control (t16 nr0 i3/0 o1/16 e[closed]/0 fd 5/5/-1 sock 5 cc -1)\r\n #2 client-session (t4 r0 i3/0 o3/0 e[write]/0 fd -1/-1/8 sock -1 cc -1)\r\n\r\ndebug2: channel 1: obuf empty\r\ndebug2: channel 1: chan_shutdown_write (i3 o1 sock 5 wfd 5 efd -1 [closed])\r\ndebug2: channel 1: output drain -> closed\r\ndebug2: channel 1: is dead (local)\r\ndebug2: channel 1: gc: notify user\r\ndebug3: mux_master_control_cleanup_cb: entering for channel 1\r\ndebug2: channel 1: gc: user detached\r\ndebug2: channel 1: is dead (local)\r\ndebug2: channel 1: garbage collecting\r\ndebug1: channel 1: free: mux-control, nchannels 2\r\ndebug3: channel 1: status: The following connections are open:\r\n #1 mux-control (t16 nr0 i3/0 o3/0 e[closed]/0 fd 5/5/-1 sock 5 cc -1)\r\n\r\ndebug2: set_control_persist_exit_time: schedule exit in 60 seconds\r\ndebug3: mux_client_read_packet: read header failed: Broken pipe\r\ndebug2: Received exit status from master 1\r\n')
<10.1.1.201> Failed to connect to the host via ssh: OpenSSH_8.2p1 Ubuntu-4ubuntu0.3, OpenSSL 1.1.1f 31 Mar 2020
debug1: Reading configuration data /etc/ssh/ssh_config
debug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files
debug1: /etc/ssh/ssh_config line 21: Applying options for *
debug2: resolve_canonicalize: hostname 10.1.1.201 is address
debug1: auto-mux: Trying existing master
debug1: Control socket "/root/.ansible/cp/48372ad7a3" does not exist
debug2: ssh_connect_direct
debug1: Connecting to 10.1.1.201 [10.1.1.201] port 22.
debug2: fd 3 setting O_NONBLOCK
debug1: fd 3 clearing O_NONBLOCK
debug1: Connection established.
debug3: timeout: 10000 ms remain after connect
debug1: SELinux support disabled
debug1: identity file /root/.ssh/id_rsa type -1
debug1: identity file /root/.ssh/id_rsa-cert type -1
debug1: identity file /root/.ssh/id_dsa type -1
debug1: identity file /root/.ssh/id_dsa-cert type -1
debug1: identity file /root/.ssh/id_ecdsa type -1
debug1: identity file /root/.ssh/id_ecdsa-cert type -1
debug1: identity file /root/.ssh/id_ecdsa_sk type -1
debug1: identity file /root/.ssh/id_ecdsa_sk-cert type -1
debug1: identity file /root/.ssh/id_ed25519 type -1
debug1: identity file /root/.ssh/id_ed25519-cert type -1
debug1: identity file /root/.ssh/id_ed25519_sk type -1
debug1: identity file /root/.ssh/id_ed25519_sk-cert type -1
debug1: identity file /root/.ssh/id_xmss type -1
debug1: identity file /root/.ssh/id_xmss-cert type -1
debug1: Local version string SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3
debug1: Remote protocol version 2.0, remote software version OpenSSH_8.2p1 Ubuntu-4ubuntu0.3
debug1: match: OpenSSH_8.2p1 Ubuntu-4ubuntu0.3 pat OpenSSH* compat 0x04000000
debug2: fd 3 setting O_NONBLOCK
debug1: Authenticating to 10.1.1.201:22 as 'manager'
debug3: send packet: type 20
debug1: SSH2_MSG_KEXINIT sent
debug3: receive packet: type 20
debug1: SSH2_MSG_KEXINIT received
debug2: local client KEXINIT proposal
debug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256,ext-info-c
debug2: host key algorithms: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected],ssh-ed25519,[email protected],rsa-sha2-512,rsa-sha2-256,ssh-rsa
debug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]
debug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]
debug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1
debug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1
debug2: compression ctos: [email protected],zlib,none
debug2: compression stoc: [email protected],zlib,none
debug2: languages ctos:
debug2: languages stoc:
debug2: first_kex_follows 0
debug2: reserved 0
debug2: peer server KEXINIT proposal
debug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256
debug2: host key algorithms: rsa-sha2-512,rsa-sha2-256,ssh-rsa,ecdsa-sha2-nistp256,ssh-ed25519
debug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]
debug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]
debug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1
debug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1
debug2: compression ctos: none,[email protected]
debug2: compression stoc: none,[email protected]
debug2: languages ctos:
debug2: languages stoc:
debug2: first_kex_follows 0
debug2: reserved 0
debug1: kex: algorithm: curve25519-sha256
debug1: kex: host key algorithm: ecdsa-sha2-nistp256
debug1: kex: server->client cipher: [email protected] MAC: <implicit> compression: [email protected]
debug1: kex: client->server cipher: [email protected] MAC: <implicit> compression: [email protected]
debug3: send packet: type 30
debug1: expecting SSH2_MSG_KEX_ECDH_REPLY
debug3: receive packet: type 31
debug1: Server host key: ecdsa-sha2-nistp256 SHA256:LylIOfTqWm+geE35hisc6xKHr/ZHTHTs/UQn80pYXOo
Warning: Permanently added '10.1.1.201' (ECDSA) to the list of known hosts.
debug3: send packet: type 21
debug2: set_newkeys: mode 1
debug1: rekey out after 134217728 blocks
debug1: SSH2_MSG_NEWKEYS sent
debug1: expecting SSH2_MSG_NEWKEYS
debug3: receive packet: type 21
debug1: SSH2_MSG_NEWKEYS received
debug2: set_newkeys: mode 0
debug1: rekey in after 134217728 blocks
debug1: Will attempt key: /root/.ssh/id_rsa
debug1: Will attempt key: /root/.ssh/id_dsa
debug1: Will attempt key: /root/.ssh/id_ecdsa
debug1: Will attempt key: /root/.ssh/id_ecdsa_sk
debug1: Will attempt key: /root/.ssh/id_ed25519
debug1: Will attempt key: /root/.ssh/id_ed25519_sk
debug1: Will attempt key: /root/.ssh/id_xmss
debug2: pubkey_prepare: done
debug3: send packet: type 5
debug3: receive packet: type 7
debug1: SSH2_MSG_EXT_INFO received
debug1: kex_input_ext_info: server-sig-algs=<ssh-ed25519,[email protected],ssh-rsa,rsa-sha2-256,rsa-sha2-512,ssh-dss,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected]>
debug3: receive packet: type 6
debug2: service_accept: ssh-userauth
debug1: SSH2_MSG_SERVICE_ACCEPT received
debug3: send packet: type 50
debug3: receive packet: type 51
debug1: Authentications that can continue: publickey,password
debug3: start over, passed a different list publickey,password
debug3: preferred gssapi-with-mic,publickey,keyboard-interactive,password
debug3: authmethod_lookup publickey
debug3: remaining preferred: keyboard-interactive,password
debug3: authmethod_is_enabled publickey
debug1: Next authentication method: publickey
debug1: Trying private key: /root/.ssh/id_rsa
debug3: no such identity: /root/.ssh/id_rsa: No such file or directory
debug1: Trying private key: /root/.ssh/id_dsa
debug3: no such identity: /root/.ssh/id_dsa: No such file or directory
debug1: Trying private key: /root/.ssh/id_ecdsa
debug3: no such identity: /root/.ssh/id_ecdsa: No such file or directory
debug1: Trying private key: /root/.ssh/id_ecdsa_sk
debug3: no such identity: /root/.ssh/id_ecdsa_sk: No such file or directory
debug1: Trying private key: /root/.ssh/id_ed25519
debug3: no such identity: /root/.ssh/id_ed25519: No such file or directory
debug1: Trying private key: /root/.ssh/id_ed25519_sk
debug3: no such identity: /root/.ssh/id_ed25519_sk: No such file or directory
debug1: Trying private key: /root/.ssh/id_xmss
debug3: no such identity: /root/.ssh/id_xmss: No such file or directory
debug2: we did not send a packet, disable method
debug3: authmethod_lookup password
debug3: remaining preferred: ,password
debug3: authmethod_is_enabled password
debug1: Next authentication method: password
debug3: send packet: type 50
debug2: we sent a password packet, wait for reply
debug3: receive packet: type 52
debug1: Enabling compression at level 6.
debug1: Authentication succeeded (password).
Authenticated to 10.1.1.201 ([10.1.1.201]:22).
debug1: setting up multiplex master socket
debug3: muxserver_listen: temporary control path /root/.ansible/cp/48372ad7a3.XW29Aj350ciuha3l
debug2: fd 4 setting O_NONBLOCK
debug3: fd 4 is O_NONBLOCK
debug3: fd 4 is O_NONBLOCK
debug1: channel 0: new [/root/.ansible/cp/48372ad7a3]
debug3: muxserver_listen: mux listener channel 0 fd 4
debug2: fd 3 setting TCP_NODELAY
debug3: ssh_packet_set_tos: set IP_TOS 0x08
debug1: control_persist_detach: backgrounding master process
debug2: control_persist_detach: background process is 16
debug2: fd 4 setting O_NONBLOCK
debug1: forking to background
debug1: Entering interactive session.
debug1: pledge: id
debug2: set_control_persist_exit_time: schedule exit in 60 seconds
debug1: multiplexing control connection
debug2: fd 5 setting O_NONBLOCK
debug3: fd 5 is O_NONBLOCK
debug1: channel 1: new [mux-control]
debug3: channel_post_mux_listener: new mux channel 1 fd 5
debug3: mux_master_read_cb: channel 1: hello sent
debug2: set_control_persist_exit_time: cancel scheduled exit
debug3: mux_master_read_cb: channel 1 packet type 0x00000001 len 4
debug2: mux_master_process_hello: channel 1 slave version 4
debug2: mux_client_hello_exchange: master version 4
debug3: mux_client_forwards: request forwardings: 0 local, 0 remote
debug3: mux_client_request_session: entering
debug3: mux_client_request_alive: entering
debug3: mux_master_read_cb: channel 1 packet type 0x10000004 len 4
debug2: mux_master_process_alive_check: channel 1: alive check
debug3: mux_client_request_alive: done pid = 18
debug3: mux_client_request_session: session request sent
debug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 256
debug3: mux_master_process_new_session: got fds stdin 6, stdout 7, stderr 8
debug2: fd 6 setting O_NONBLOCK
debug2: fd 7 setting O_NONBLOCK
debug2: fd 8 setting O_NONBLOCK
debug1: channel 2: new [client-session]
debug2: mux_master_process_new_session: channel_new: 2 linked to control channel 1
debug2: channel 2: send open
debug3: send packet: type 90
debug3: receive packet: type 80
debug1: client_input_global_request: rtype [email protected] want_reply 0
debug3: receive packet: type 91
debug2: channel_input_open_confirmation: channel 2: callback start
debug2: client_session2_setup: id 2
debug1: Sending environment.
debug1: Sending env LANG = C.UTF-8
debug2: channel 2: request env confirm 0
debug3: send packet: type 98
debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:" -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-muszsaqzcjtetkenkeiiqhqnnsdqlvmx ; python3'"'"' && sleep 0'
debug2: channel 2: request exec confirm 1
debug3: send packet: type 98
debug3: mux_session_confirm: sending success reply
debug2: channel_input_open_confirmation: channel 2: callback done
debug2: channel 2: open confirm rwindow 0 rmax 32768
debug2: channel 2: rcvd adjust 2097152
debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: exec request accepted on channel 2
debug2: channel 2: read<=0 rfd 6 len 0
debug2: channel 2: read failed
debug2: channel 2: chan_shutdown_read (i0 o0 sock -1 wfd 6 efd 8 [write])
debug2: channel 2: input open -> drain
debug2: channel 2: ibuf empty
debug2: channel 2: send eof
debug3: send packet: type 96
debug2: channel 2: input drain -> closed
debug2: channel 2: rcvd adjust 65536
debug2: channel 2: rcvd ext data 66
[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8
debug2: channel 2: rcvd ext data 18
Sorry, try again.
debug2: channel 2: written 18 to efd 8
debug2: channel 2: rcvd ext data 66
[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8
debug2: channel 2: rcvd ext data 18
Sorry, try again.
debug2: channel 2: written 18 to efd 8
debug2: channel 2: rcvd ext data 66
[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8
debug2: channel 2: rcvd ext data 36
sudo: 3 incorrect password attempts
debug2: channel 2: written 36 to efd 8
debug3: receive packet: type 98
debug1: client_input_channel_req: channel 2 rtype [email protected] reply 0
debug2: channel 2: rcvd eow
debug3: receive packet: type 96
debug2: channel 2: rcvd eof
debug2: channel 2: output open -> drain
debug2: channel 2: obuf empty
debug2: channel 2: chan_shutdown_write (i3 o1 sock -1 wfd 7 efd 8 [write])
debug2: channel 2: output drain -> closed
debug2: channel 2: send close
debug3: send packet: type 97
debug3: channel 2: will not send data after close
debug3: receive packet: type 98
debug1: client_input_channel_req: channel 2 rtype exit-status reply 0
debug3: mux_exit_message: channel 2: exit message, exitval 1
debug3: receive packet: type 97
debug2: channel 2: rcvd close
debug3: channel 2: will not send data after close
debug2: channel 2: is dead
debug2: channel 2: gc: notify user
debug3: mux_master_session_cleanup_cb: entering for channel 2
debug2: channel 1: rcvd close
debug2: channel 1: output open -> drain
debug2: channel 1: chan_shutdown_read (i0 o1 sock 5 wfd 5 efd -1 [closed])
debug2: channel 1: input open -> closed
debug2: channel 2: gc: user detached
debug2: channel 2: is dead
debug2: channel 2: garbage collecting
debug1: channel 2: free: client-session, nchannels 3
debug3: channel 2: status: The following connections are open:
#1 mux-control (t16 nr0 i3/0 o1/16 e[closed]/0 fd 5/5/-1 sock 5 cc -1)
#2 client-session (t4 r0 i3/0 o3/0 e[write]/0 fd -1/-1/8 sock -1 cc -1)
debug2: channel 1: obuf empty
debug2: channel 1: chan_shutdown_write (i3 o1 sock 5 wfd 5 efd -1 [closed])
debug2: channel 1: output drain -> closed
debug2: channel 1: is dead (local)
debug2: channel 1: gc: notify user
debug3: mux_master_control_cleanup_cb: entering for channel 1
debug2: channel 1: gc: user detached
debug2: channel 1: is dead (local)
debug2: channel 1: garbage collecting
debug1: channel 1: free: mux-control, nchannels 2
debug3: channel 1: status: The following connections are open:
#1 mux-control (t16 nr0 i3/0 o3/0 e[closed]/0 fd 5/5/-1 sock 5 cc -1)
debug2: set_control_persist_exit_time: schedule exit in 60 seconds
debug3: mux_client_read_packet: read header failed: Broken pipe
debug2: Received exit status from master 1
fatal: [test1]: FAILED! => {
"ansible_facts": {},
"changed": false,
"failed_modules": {
"ansible.legacy.setup": {
"failed": true,
"module_stderr": "OpenSSH_8.2p1 Ubuntu-4ubuntu0.3, OpenSSL 1.1.1f 31 Mar 2020\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files\r\ndebug1: /etc/ssh/ssh_config line 21: Applying options for *\r\ndebug2: resolve_canonicalize: hostname 10.1.1.201 is address\r\ndebug1: auto-mux: Trying existing master\r\ndebug1: Control socket \"/root/.ansible/cp/48372ad7a3\" does not exist\r\ndebug2: ssh_connect_direct\r\ndebug1: Connecting to 10.1.1.201 [10.1.1.201] port 22.\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: fd 3 clearing O_NONBLOCK\r\ndebug1: Connection established.\r\ndebug3: timeout: 10000 ms remain after connect\r\ndebug1: SELinux support disabled\r\ndebug1: identity file /root/.ssh/id_rsa type -1\r\ndebug1: identity file /root/.ssh/id_rsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_dsa type -1\r\ndebug1: identity file /root/.ssh/id_dsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa_sk type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa_sk-cert type -1\r\ndebug1: identity file /root/.ssh/id_ed25519 type -1\r\ndebug1: identity file /root/.ssh/id_ed25519-cert type -1\r\ndebug1: identity file /root/.ssh/id_ed25519_sk type -1\r\ndebug1: identity file /root/.ssh/id_ed25519_sk-cert type -1\r\ndebug1: identity file /root/.ssh/id_xmss type -1\r\ndebug1: identity file /root/.ssh/id_xmss-cert type -1\r\ndebug1: Local version string SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3\r\ndebug1: Remote protocol version 2.0, remote software version OpenSSH_8.2p1 Ubuntu-4ubuntu0.3\r\ndebug1: match: OpenSSH_8.2p1 Ubuntu-4ubuntu0.3 pat OpenSSH* compat 0x04000000\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: Authenticating to 10.1.1.201:22 as 'manager'\r\ndebug3: send packet: type 20\r\ndebug1: SSH2_MSG_KEXINIT sent\r\ndebug3: receive packet: type 20\r\ndebug1: SSH2_MSG_KEXINIT received\r\ndebug2: local client KEXINIT proposal\r\ndebug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256,ext-info-c\r\ndebug2: host key algorithms: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected],ssh-ed25519,[email protected],rsa-sha2-512,rsa-sha2-256,ssh-rsa\r\ndebug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: compression ctos: [email protected],zlib,none\r\ndebug2: compression stoc: [email protected],zlib,none\r\ndebug2: languages ctos: \r\ndebug2: languages stoc: \r\ndebug2: first_kex_follows 0 \r\ndebug2: reserved 0 \r\ndebug2: peer server KEXINIT proposal\r\ndebug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256\r\ndebug2: host key algorithms: rsa-sha2-512,rsa-sha2-256,ssh-rsa,ecdsa-sha2-nistp256,ssh-ed25519\r\ndebug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: compression ctos: none,[email protected]\r\ndebug2: compression stoc: none,[email protected]\r\ndebug2: languages ctos: \r\ndebug2: languages stoc: \r\ndebug2: first_kex_follows 0 \r\ndebug2: reserved 0 \r\ndebug1: kex: algorithm: curve25519-sha256\r\ndebug1: kex: host key algorithm: ecdsa-sha2-nistp256\r\ndebug1: kex: server->client cipher: [email protected] MAC: <implicit> compression: [email protected]\r\ndebug1: kex: client->server cipher: [email protected] MAC: <implicit> compression: [email protected]\r\ndebug3: send packet: type 30\r\ndebug1: expecting SSH2_MSG_KEX_ECDH_REPLY\r\ndebug3: receive packet: type 31\r\ndebug1: Server host key: ecdsa-sha2-nistp256 SHA256:LylIOfTqWm+geE35hisc6xKHr/ZHTHTs/UQn80pYXOo\r\nWarning: Permanently added '10.1.1.201' (ECDSA) to the list of known hosts.\r\ndebug3: send packet: type 21\r\ndebug2: set_newkeys: mode 1\r\ndebug1: rekey out after 134217728 blocks\r\ndebug1: SSH2_MSG_NEWKEYS sent\r\ndebug1: expecting SSH2_MSG_NEWKEYS\r\ndebug3: receive packet: type 21\r\ndebug1: SSH2_MSG_NEWKEYS received\r\ndebug2: set_newkeys: mode 0\r\ndebug1: rekey in after 134217728 blocks\r\ndebug1: Will attempt key: /root/.ssh/id_rsa \r\ndebug1: Will attempt key: /root/.ssh/id_dsa \r\ndebug1: Will attempt key: /root/.ssh/id_ecdsa \r\ndebug1: Will attempt key: /root/.ssh/id_ecdsa_sk \r\ndebug1: Will attempt key: /root/.ssh/id_ed25519 \r\ndebug1: Will attempt key: /root/.ssh/id_ed25519_sk \r\ndebug1: Will attempt key: /root/.ssh/id_xmss \r\ndebug2: pubkey_prepare: done\r\ndebug3: send packet: type 5\r\ndebug3: receive packet: type 7\r\ndebug1: SSH2_MSG_EXT_INFO received\r\ndebug1: kex_input_ext_info: server-sig-algs=<ssh-ed25519,[email protected],ssh-rsa,rsa-sha2-256,rsa-sha2-512,ssh-dss,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected]>\r\ndebug3: receive packet: type 6\r\ndebug2: service_accept: ssh-userauth\r\ndebug1: SSH2_MSG_SERVICE_ACCEPT received\r\ndebug3: send packet: type 50\r\ndebug3: receive packet: type 51\r\ndebug1: Authentications that can continue: publickey,password\r\ndebug3: start over, passed a different list publickey,password\r\ndebug3: preferred gssapi-with-mic,publickey,keyboard-interactive,password\r\ndebug3: authmethod_lookup publickey\r\ndebug3: remaining preferred: keyboard-interactive,password\r\ndebug3: authmethod_is_enabled publickey\r\ndebug1: Next authentication method: publickey\r\ndebug1: Trying private key: /root/.ssh/id_rsa\r\ndebug3: no such identity: /root/.ssh/id_rsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_dsa\r\ndebug3: no such identity: /root/.ssh/id_dsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ecdsa\r\ndebug3: no such identity: /root/.ssh/id_ecdsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ecdsa_sk\r\ndebug3: no such identity: /root/.ssh/id_ecdsa_sk: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ed25519\r\ndebug3: no such identity: /root/.ssh/id_ed25519: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ed25519_sk\r\ndebug3: no such identity: /root/.ssh/id_ed25519_sk: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_xmss\r\ndebug3: no such identity: /root/.ssh/id_xmss: No such file or directory\r\ndebug2: we did not send a packet, disable method\r\ndebug3: authmethod_lookup password\r\ndebug3: remaining preferred: ,password\r\ndebug3: authmethod_is_enabled password\r\ndebug1: Next authentication method: password\r\ndebug3: send packet: type 50\r\ndebug2: we sent a password packet, wait for reply\r\ndebug3: receive packet: type 52\r\ndebug1: Enabling compression at level 6.\r\ndebug1: Authentication succeeded (password).\r\nAuthenticated to 10.1.1.201 ([10.1.1.201]:22).\r\ndebug1: setting up multiplex master socket\r\ndebug3: muxserver_listen: temporary control path /root/.ansible/cp/48372ad7a3.XW29Aj350ciuha3l\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug3: fd 4 is O_NONBLOCK\r\ndebug3: fd 4 is O_NONBLOCK\r\ndebug1: channel 0: new [/root/.ansible/cp/48372ad7a3]\r\ndebug3: muxserver_listen: mux listener channel 0 fd 4\r\ndebug2: fd 3 setting TCP_NODELAY\r\ndebug3: ssh_packet_set_tos: set IP_TOS 0x08\r\ndebug1: control_persist_detach: backgrounding master process\r\ndebug2: control_persist_detach: background process is 16\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug1: forking to background\r\ndebug1: Entering interactive session.\r\ndebug1: pledge: id\r\ndebug2: set_control_persist_exit_time: schedule exit in 60 seconds\r\ndebug1: multiplexing control connection\r\ndebug2: fd 5 setting O_NONBLOCK\r\ndebug3: fd 5 is O_NONBLOCK\r\ndebug1: channel 1: new [mux-control]\r\ndebug3: channel_post_mux_listener: new mux channel 1 fd 5\r\ndebug3: mux_master_read_cb: channel 1: hello sent\r\ndebug2: set_control_persist_exit_time: cancel scheduled exit\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x00000001 len 4\r\ndebug2: mux_master_process_hello: channel 1 slave version 4\r\ndebug2: mux_client_hello_exchange: master version 4\r\ndebug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r\ndebug3: mux_client_request_session: entering\r\ndebug3: mux_client_request_alive: entering\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x10000004 len 4\r\ndebug2: mux_master_process_alive_check: channel 1: alive check\r\ndebug3: mux_client_request_alive: done pid = 18\r\ndebug3: mux_client_request_session: session request sent\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 256\r\ndebug3: mux_master_process_new_session: got fds stdin 6, stdout 7, stderr 8\r\ndebug2: fd 6 setting O_NONBLOCK\r\ndebug2: fd 7 setting O_NONBLOCK\r\ndebug2: fd 8 setting O_NONBLOCK\r\ndebug1: channel 2: new [client-session]\r\ndebug2: mux_master_process_new_session: channel_new: 2 linked to control channel 1\r\ndebug2: channel 2: send open\r\ndebug3: send packet: type 90\r\ndebug3: receive packet: type 80\r\ndebug1: client_input_global_request: rtype [email protected] want_reply 0\r\ndebug3: receive packet: type 91\r\ndebug2: channel_input_open_confirmation: channel 2: callback start\r\ndebug2: client_session2_setup: id 2\r\ndebug1: Sending environment.\r\ndebug1: Sending env LANG = C.UTF-8\r\ndebug2: channel 2: request env confirm 0\r\ndebug3: send packet: type 98\r\ndebug1: Sending command: /bin/sh -c 'sudo -H -S -p \"[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:\" -u root /bin/sh -c '\"'\"'echo BECOME-SUCCESS-muszsaqzcjtetkenkeiiqhqnnsdqlvmx ; python3'\"'\"' && sleep 0'\r\ndebug2: channel 2: request exec confirm 1\r\ndebug3: send packet: type 98\r\ndebug3: mux_session_confirm: sending success reply\r\ndebug2: channel_input_open_confirmation: channel 2: callback done\r\ndebug2: channel 2: open confirm rwindow 0 rmax 32768\r\ndebug2: channel 2: rcvd adjust 2097152\r\ndebug3: receive packet: type 99\r\ndebug2: channel_input_status_confirm: type 99 id 2\r\ndebug2: exec request accepted on channel 2\r\ndebug2: channel 2: read<=0 rfd 6 len 0\r\ndebug2: channel 2: read failed\r\ndebug2: channel 2: chan_shutdown_read (i0 o0 sock -1 wfd 6 efd 8 [write])\r\ndebug2: channel 2: input open -> drain\r\ndebug2: channel 2: ibuf empty\r\ndebug2: channel 2: send eof\r\ndebug3: send packet: type 96\r\ndebug2: channel 2: input drain -> closed\r\ndebug2: channel 2: rcvd adjust 65536\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 18\r\nSorry, try again.\ndebug2: channel 2: written 18 to efd 8\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 18\r\nSorry, try again.\ndebug2: channel 2: written 18 to efd 8\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 36\r\nsudo: 3 incorrect password attempts\ndebug2: channel 2: written 36 to efd 8\r\ndebug3: receive packet: type 98\r\ndebug1: client_input_channel_req: channel 2 rtype [email protected] reply 0\r\ndebug2: channel 2: rcvd eow\r\ndebug3: receive packet: type 96\r\ndebug2: channel 2: rcvd eof\r\ndebug2: channel 2: output open -> drain\r\ndebug2: channel 2: obuf empty\r\ndebug2: channel 2: chan_shutdown_write (i3 o1 sock -1 wfd 7 efd 8 [write])\r\ndebug2: channel 2: output drain -> closed\r\ndebug2: channel 2: send close\r\ndebug3: send packet: type 97\r\ndebug3: channel 2: will not send data after close\r\ndebug3: receive packet: type 98\r\ndebug1: client_input_channel_req: channel 2 rtype exit-status reply 0\r\ndebug3: mux_exit_message: channel 2: exit message, exitval 1\r\ndebug3: receive packet: type 97\r\ndebug2: channel 2: rcvd close\r\ndebug3: channel 2: will not send data after close\r\ndebug2: channel 2: is dead\r\ndebug2: channel 2: gc: notify user\r\ndebug3: mux_master_session_cleanup_cb: entering for channel 2\r\ndebug2: channel 1: rcvd close\r\ndebug2: channel 1: output open -> drain\r\ndebug2: channel 1: chan_shutdown_read (i0 o1 sock 5 wfd 5 efd -1 [closed])\r\ndebug2: channel 1: input open -> closed\r\ndebug2: channel 2: gc: user detached\r\ndebug2: channel 2: is dead\r\ndebug2: channel 2: garbage collecting\r\ndebug1: channel 2: free: client-session, nchannels 3\r\ndebug3: channel 2: status: The following connections are open:\r\n #1 mux-control (t16 nr0 i3/0 o1/16 e[closed]/0 fd 5/5/-1 sock 5 cc -1)\r\n #2 client-session (t4 r0 i3/0 o3/0 e[write]/0 fd -1/-1/8 sock -1 cc -1)\r\n\r\ndebug2: channel 1: obuf empty\r\ndebug2: channel 1: chan_shutdown_write (i3 o1 sock 5 wfd 5 efd -1 [closed])\r\ndebug2: channel 1: output drain -> closed\r\ndebug2: channel 1: is dead (local)\r\ndebug2: channel 1: gc: notify user\r\ndebug3: mux_master_control_cleanup_cb: entering for channel 1\r\ndebug2: channel 1: gc: user detached\r\ndebug2: channel 1: is dead (local)\r\ndebug2: channel 1: garbage collecting\r\ndebug1: channel 1: free: mux-control, nchannels 2\r\ndebug3: channel 1: status: The following connections are open:\r\n #1 mux-control (t16 nr0 i3/0 o3/0 e[closed]/0 fd 5/5/-1 sock 5 cc -1)\r\n\r\ndebug2: set_control_persist_exit_time: schedule exit in 60 seconds\r\ndebug3: mux_client_read_packet: read header failed: Broken pipe\r\ndebug2: Received exit status from master 1\r\n",
"module_stdout": "",
"msg": "MODULE FAILURE\nSee stdout/stderr for the exact error",
"rc": 1
}
},
"msg": "The following modules failed to execute: ansible.legacy.setup\n"
}
PLAY RECAP *********************************************************************
test1 : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
Thursday 30 September 2021 14:38:33 +0000 (0:00:17.037) 0:00:17.098 ****
===============================================================================
Gathering Facts -------------------------------------------------------- 17.04s
/workspace/test.yml:2 ---------------------------------------------------------
```
Ouptut with `-vvv`
```console
ansible-playbook [core 2.11.5]
config file = /workspace/ansible.cfg
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible-playbook
python version = 3.8.10 (default, Jun 2 2021, 10:49:15) [GCC 9.4.0]
jinja version = 3.0.1
libyaml = True
Using /workspace/ansible.cfg as config file
host_list declined parsing /workspace/inventories/test/hosts.yml as it did not pass its verify_file() method
script declined parsing /workspace/inventories/test/hosts.yml as it did not pass its verify_file() method
Parsed /workspace/inventories/test/hosts.yml inventory source with yaml plugin
Skipping callback 'default', as we already have a stdout callback.
Skipping callback 'minimal', as we already have a stdout callback.
Skipping callback 'oneline', as we already have a stdout callback.
PLAYBOOK: test.yml *************************************************************
1 plays in test.yml
PLAY [gitlab_runners] **********************************************************
TASK [Gathering Facts] *********************************************************
task path: /workspace/test.yml:2
Thursday 30 September 2021 14:39:02 +0000 (0:00:00.044) 0:00:00.044 ****
Using module file /usr/local/lib/python3.8/dist-packages/ansible/modules/setup.py
Pipelining is enabled.
<10.1.1.201> ESTABLISH SSH CONNECTION FOR USER: manager
<10.1.1.201> SSH: EXEC sshpass -d10 ssh -C -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o 'User="manager"' -o ConnectTimeout=10 -o StrictHostKeyChecking=no -o ControlPath=/root/.ansible/cp/48372ad7a3 10.1.1.201 '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=emwqvsoxlkrjzgibgvumxuqwnsbstafh] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-emwqvsoxlkrjzgibgvumxuqwnsbstafh ; python3'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<10.1.1.201> (0, b'\n{"ansible_facts": {"ansible_system": "Linux", "ansible_kernel": "5.4.0-86-generic", "ansible_kernel_version": "#97-Ubuntu SMP Fri Sep 17 19:19:40 UTC 2021", "ansible_machine": "x86_64", "ansible_python_version": "3.8.10", "ansible_fqdn": "test1", "ansible_hostname": "test1", "ansible_nodename": "test1", "ansible_domain": "", "ansible_userspace_bits": "64", "ansible_architecture": "x86_64", "ansible_userspace_architecture": "x86_64", "ansible_machine_id": "c47dd6e16b184c00a0226434c8d1b693", "ansible_user_id": "root", "ansible_user_uid": 0, "ansible_user_gid": 0, "ansible_user_gecos": "root", "ansible_user_dir": "/root", "ansible_user_shell": "/bin/bash", "ansible_real_user_id": 0, "ansible_effective_user_id": 0, "ansible_real_group_id": 0, "ansible_effective_group_id": 0, "ansible_python": {"version": {"major": 3, "minor": 8, "micro": 10, "releaselevel": "final", "serial": 0}, "version_info": [3, 8, 10, "final", 0], "executable": "/usr/bin/python3", "has_sslcontext": true, "type": "cpython"}, "ansible_fibre_channel_wwn": [], "ansible_is_chroot": false, "ansible_dns": {"nameservers": ["127.0.0.53"], "options": {"edns0": true, "trust-ad": true}, "search": ["frauscher.intern"]}, "ansible_processor": ["0", "GenuineIntel", "Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.60GHz", "1", "GenuineIntel", "Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.60GHz"], "ansible_processor_count": 1, "ansible_processor_cores": 2, "ansible_processor_threads_per_core": 1, "ansible_processor_vcpus": 2, "ansible_processor_nproc": 2, "ansible_memtotal_mb": 1987, "ansible_memfree_mb": 1459, "ansible_swaptotal_mb": 0, "ansible_swapfree_mb": 0, "ansible_memory_mb": {"real": {"total": 1987, "used": 528, "free": 1459}, "nocache": {"free": 1790, "used": 197}, "swap": {"total": 0, "free": 0, "used": 0, "cached": 0}}, "ansible_bios_date": "12/01/2006", "ansible_bios_vendor": "innotek GmbH", "ansible_bios_version": "VirtualBox", "ansible_board_asset_tag": "NA", "ansible_board_name": "VirtualBox", "ansible_board_serial": "0", "ansible_board_vendor": "Oracle Corporation", "ansible_board_version": "1.2", "ansible_chassis_asset_tag": "NA", "ansible_chassis_serial": "NA", "ansible_chassis_vendor": "Oracle Corporation", "ansible_chassis_version": "NA", "ansible_form_factor": "Other", "ansible_product_name": "VirtualBox", "ansible_product_serial": "0", "ansible_product_uuid": "e0eea82f-53ae-3447-bc29-87fb43fa6de8", "ansible_product_version": "1.2", "ansible_system_vendor": "innotek GmbH", "ansible_devices": {"loop1": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "4096", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "137728", "sectorsize": "512", "size": "67.25 MB", "host": "", "holders": []}, "sdb": {"virtual": 1, "links": {"ids": [], "uuids": ["2021-09-23-22-20-32-00"], "labels": ["cidata"], "masters": []}, "vendor": "VBOX", "model": "HARDDISK", "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "20480", "sectorsize": "512", "size": "10.00 MB", "host": "SCSI storage controller: Broadcom / LSI 53c1030 PCI-X Fusion-MPT Dual Ultra320 SCSI", "holders": []}, "loop6": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}, "loop4": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}, "loop2": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "4096", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "66168", "sectorsize": "512", "size": "32.31 MB", "host": "", "holders": []}, "loop0": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "4096", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "126504", "sectorsize": "512", "size": "61.77 MB", "host": "", "holders": []}, "loop7": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}, "sda": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": "VBOX", "model": "HARDDISK", "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {"sda1": {"links": {"ids": [], "uuids": ["94aefc52-4039-4b4a-8b2a-65124174d9b9"], "labels": ["cloudimg-rootfs"], "masters": []}, "start": "2048", "sectors": "83883999", "sectorsize": 512, "size": "40.00 GB", "uuid": "94aefc52-4039-4b4a-8b2a-65124174d9b9", "holders": []}}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "83886080", "sectorsize": "512", "size": "40.00 GB", "host": "SCSI storage controller: Broadcom / LSI 53c1030 PCI-X Fusion-MPT Dual Ultra320 SCSI", "holders": []}, "loop5": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}, "loop3": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "4096", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}}, "ansible_device_links": {"ids": {}, "uuids": {"sda1": ["94aefc52-4039-4b4a-8b2a-65124174d9b9"], "sdb": ["2021-09-23-22-20-32-00"]}, "labels": {"sda1": ["cloudimg-rootfs"], "sdb": ["cidata"]}, "masters": {}}, "ansible_uptime_seconds": 319, "ansible_lvm": {"lvs": {}, "vgs": {}, "pvs": {}}, "ansible_mounts": [{"mount": "/", "device": "/dev/sda1", "fstype": "ext4", "options": "rw,relatime", "size_total": 41567858688, "size_available": 40172617728, "block_size": 4096, "block_total": 10148403, "block_available": 9807768, "block_used": 340635, "inode_total": 5120000, "inode_available": 5047941, "inode_used": 72059, "uuid": "94aefc52-4039-4b4a-8b2a-65124174d9b9"}, {"mount": "/snap/core20/1081", "device": "/dev/loop0", "fstype": "squashfs", "options": "ro,nodev,relatime", "size_total": 64880640, "size_available": 0, "block_size": 131072, "block_total": 495, "block_available": 0, "block_used": 495, "inode_total": 11720, "inode_available": 0, "inode_used": 11720, "uuid": "N/A"}, {"mount": "/snap/lxd/21545", "device": "/dev/loop1", "fstype": "squashfs", "options": "ro,nodev,relatime", "size_total": 70516736, "size_available": 0, "block_size": 131072, "block_total": 538, "block_available": 0, "block_used": 538, "inode_total": 796, "inode_available": 0, "inode_used": 796, "uuid": "N/A"}, {"mount": "/snap/snapd/13170", "device": "/dev/loop2", "fstype": "squashfs", "options": "ro,nodev,relatime", "size_total": 33947648, "size_available": 0, "block_size": 131072, "block_total": 259, "block_available": 0, "block_used": 259, "inode_total": 474, "inode_available": 0, "inode_used": 474, "uuid": "N/A"}], "ansible_system_capabilities_enforced": "False", "ansible_system_capabilities": [], "ansible_ssh_host_key_dsa_public": "AAAAB3NzaC1kc3MAAACBANhTZfF+TbSqS3c3V0mYbiGOOHdB+wzCQlw73P/orjE3GaQQ3u3x4FA5RZcrfaBXKxgCatn4oTHbYN1vPhRECVvasTgZtyNP418lXaEsd4fJ7SLc0ju4BKaq8Pr5ppNSbcOEKru96ENnbHHrDi/XC7fKSpHijjGDhK86+HYs724/AAAAFQDGLonRqAKyuVczMoJLceaRShaCAwAAAIArdhTSkCW1LKS6II0NSonSQWeFb6f0X2kGVmfyvy9wFDgwdx5BkMEdaJBZrdIrROHQF+IPcK3jNfEAeJjfJ07YSFPiTQGzV6Y3clHchT/y6z0t0yHn+eP3w3OEjAPwRB4NAOdGsQdpg7Qs/sgSLjJ47EbLpf6P+ImfH7L8NMNB+wAAAIBIIpiFAEOa670brc4+aIJhObs3vuCKkmr9lOHBBqtMM/a8CQprG8PrbacZ3TRUBzBMTmWyFVSsniEpAhy1V7BHt7sgUBWu27A4fb1O94Vb1r7cXZ8BSKnT8TRmzdnxMdYPAiYhgQYF3uRfPB97AjBlDOaZGIug7quxtoM8+lg6YQ==", "ansible_ssh_host_key_dsa_public_keytype": "ssh-dss", "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABgQDYA8nuLROQD6tE5QAyK/7j+ewLYa+xOLFcDelQ2ack4gh8+2cM11oEvuz6QcL4xVKIOz308M8WK9x6dKGF2ujCaSEp2/DWXLbpCX/Rn6pB6OOeolq6Cop/OTE8M9tutCzyG1mr9UZZTkPks/j3bsIPPHjHNf/554BH4beG7zTlzARp4MABqkmTQ1XGGzZLxzRGKUUJaLo+6mxxf+3mcKKpzHTHiaGf20s57WXIbokJ1PJ4iwh/zdT93ebG7VwxxPcxDs+UwHcTJMnhXHpOwlXaBxUyawzOWGceWnpmlXs/aycqZ/5LbZf1BQp13WnDiLfuD1tzXCHq0Dl05cnMuOJhwSSb+13UZr1uxk7y8EUiYKKbOoamj7+8wvfi++3mGwHW1kokm1wWsu79EBxaVu7r0nTxs57JuCHFIUvDt63ajI0G4+U9Qhu7Kxi9NmNyoyv1JSPVXyv2dSZFQZwh6phzyyztirQSR4wbTzlEbmhKHESMUkhE0mgdMWajxzLUIXM=", "ansible_ssh_host_key_rsa_public_keytype": "ssh-rsa", "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOrElzJrdktqNP+A5xDLWb3KLPYOjn9q/0v0+/15rBMNgTvkQXyVhbV/b8OxEpjo7evCk4XNM6Zg17N5AxdDX7k=", "ansible_ssh_host_key_ecdsa_public_keytype": "ecdsa-sha2-nistp256", "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAICHStriNsa0rVUQpwU7t4cmg+DNXSPVS+csFo7nbnlHx", "ansible_ssh_host_key_ed25519_public_keytype": "ssh-ed25519", "ansible_iscsi_iqn": "", "ansible_fips": false, "ansible_distribution": "Ubuntu", "ansible_distribution_release": "focal", "ansible_distribution_version": "20.04", "ansible_distribution_major_version": "20", "ansible_distribution_file_path": "/etc/os-release", "ansible_distribution_file_variety": "Debian", "ansible_distribution_file_parsed": true, "ansible_os_family": "Debian", "ansible_virtualization_role": "guest", "ansible_virtualization_type": "virtualbox", "ansible_virtualization_tech_guest": ["virtualbox"], "ansible_virtualization_tech_host": [], "ansible_lsb": {"id": "Ubuntu", "description": "Ubuntu 20.04.3 LTS", "release": "20.04", "codename": "focal", "major_release": "20"}, "ansible_cmdline": {"BOOT_IMAGE": "/boot/vmlinuz-5.4.0-86-generic", "root": "UUID=94aefc52-4039-4b4a-8b2a-65124174d9b9", "ro": true, "console": "ttyS0"}, "ansible_proc_cmdline": {"BOOT_IMAGE": "/boot/vmlinuz-5.4.0-86-generic", "root": "UUID=94aefc52-4039-4b4a-8b2a-65124174d9b9", "ro": true, "console": ["tty1", "ttyS0"]}, "ansible_hostnqn": "", "ansible_local": {}, "ansible_env": {"SUDO_GID": "1900", "MAIL": "/var/mail/root", "USER": "root", "HOME": "/root", "SUDO_UID": "1900", "LOGNAME": "root", "TERM": "unknown", "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin", "LANG": "C.UTF-8", "SUDO_COMMAND": "/bin/sh -c echo BECOME-SUCCESS-emwqvsoxlkrjzgibgvumxuqwnsbstafh ; python3", "SHELL": "/bin/bash", "SUDO_USER": "manager", "PWD": "/home/manager"}, "ansible_interfaces": ["enp0s8", "enp0s3", "lo"], "ansible_enp0s8": {"device": "enp0s8", "macaddress": "08:00:27:67:8d:69", "mtu": 1500, "active": true, "module": "e1000", "type": "ether", "pciid": "0000:00:08.0", "speed": 1000, "promisc": false, "ipv4": {"address": "10.1.1.201", "broadcast": "10.1.1.255", "netmask": "255.255.255.0", "network": "10.1.1.0"}, "ipv6": [{"address": "fe80::a00:27ff:fe67:8d69", "prefix": "64", "scope": "link"}], "features": {"rx_checksumming": "off", "tx_checksumming": "on", "tx_checksum_ipv4": "off [fixed]", "tx_checksum_ip_generic": "on", "tx_checksum_ipv6": "off [fixed]", "tx_checksum_fcoe_crc": "off [fixed]", "tx_checksum_sctp": "off [fixed]", "scatter_gather": "on", "tx_scatter_gather": "on", "tx_scatter_gather_fraglist": "off [fixed]", "tcp_segmentation_offload": "on", "tx_tcp_segmentation": "on", "tx_tcp_ecn_segmentation": "off [fixed]", "tx_tcp_mangleid_segmentation": "off", "tx_tcp6_segmentation": "off [fixed]", "generic_segmentation_offload": "on", "generic_receive_offload": "on", "large_receive_offload": "off [fixed]", "rx_vlan_offload": "on", "tx_vlan_offload": "on [fixed]", "ntuple_filters": "off [fixed]", "receive_hashing": "off [fixed]", "highdma": "off [fixed]", "rx_vlan_filter": "on [fixed]", "vlan_challenged": "off [fixed]", "tx_lockless": "off [fixed]", "netns_local": "off [fixed]", "tx_gso_robust": "off [fixed]", "tx_fcoe_segmentation": "off [fixed]", "tx_gre_segmentation": "off [fixed]", "tx_gre_csum_segmentation": "off [fixed]", "tx_ipxip4_segmentation": "off [fixed]", "tx_ipxip6_segmentation": "off [fixed]", "tx_udp_tnl_segmentation": "off [fixed]", "tx_udp_tnl_csum_segmentation": "off [fixed]", "tx_gso_partial": "off [fixed]", "tx_sctp_segmentation": "off [fixed]", "tx_esp_segmentation": "off [fixed]", "tx_udp_segmentation": "off [fixed]", "fcoe_mtu": "off [fixed]", "tx_nocache_copy": "off", "loopback": "off [fixed]", "rx_fcs": "off", "rx_all": "off", "tx_vlan_stag_hw_insert": "off [fixed]", "rx_vlan_stag_hw_parse": "off [fixed]", "rx_vlan_stag_filter": "off [fixed]", "l2_fwd_offload": "off [fixed]", "hw_tc_offload": "off [fixed]", "esp_hw_offload": "off [fixed]", "esp_tx_csum_hw_offload": "off [fixed]", "rx_udp_tunnel_port_offload": "off [fixed]", "tls_hw_tx_offload": "off [fixed]", "tls_hw_rx_offload": "off [fixed]", "rx_gro_hw": "off [fixed]", "tls_hw_record": "off [fixed]"}, "timestamping": ["tx_software", "rx_software", "software"], "hw_timestamp_filters": []}, "ansible_enp0s3": {"device": "enp0s3", "macaddress": "02:de:69:7f:c7:01", "mtu": 1500, "active": true, "module": "e1000", "type": "ether", "pciid": "0000:00:03.0", "speed": 1000, "promisc": false, "ipv4": {"address": "10.0.2.15", "broadcast": "10.0.2.255", "netmask": "255.255.255.0", "network": "10.0.2.0"}, "ipv6": [{"address": "fe80::de:69ff:fe7f:c701", "prefix": "64", "scope": "link"}], "features": {"rx_checksumming": "off", "tx_checksumming": "on", "tx_checksum_ipv4": "off [fixed]", "tx_checksum_ip_generic": "on", "tx_checksum_ipv6": "off [fixed]", "tx_checksum_fcoe_crc": "off [fixed]", "tx_checksum_sctp": "off [fixed]", "scatter_gather": "on", "tx_scatter_gather": "on", "tx_scatter_gather_fraglist": "off [fixed]", "tcp_segmentation_offload": "on", "tx_tcp_segmentation": "on", "tx_tcp_ecn_segmentation": "off [fixed]", "tx_tcp_mangleid_segmentation": "off", "tx_tcp6_segmentation": "off [fixed]", "generic_segmentation_offload": "on", "generic_receive_offload": "on", "large_receive_offload": "off [fixed]", "rx_vlan_offload": "on", "tx_vlan_offload": "on [fixed]", "ntuple_filters": "off [fixed]", "receive_hashing": "off [fixed]", "highdma": "off [fixed]", "rx_vlan_filter": "on [fixed]", "vlan_challenged": "off [fixed]", "tx_lockless": "off [fixed]", "netns_local": "off [fixed]", "tx_gso_robust": "off [fixed]", "tx_fcoe_segmentation": "off [fixed]", "tx_gre_segmentation": "off [fixed]", "tx_gre_csum_segmentation": "off [fixed]", "tx_ipxip4_segmentation": "off [fixed]", "tx_ipxip6_segmentation": "off [fixed]", "tx_udp_tnl_segmentation": "off [fixed]", "tx_udp_tnl_csum_segmentation": "off [fixed]", "tx_gso_partial": "off [fixed]", "tx_sctp_segmentation": "off [fixed]", "tx_esp_segmentation": "off [fixed]", "tx_udp_segmentation": "off [fixed]", "fcoe_mtu": "off [fixed]", "tx_nocache_copy": "off", "loopback": "off [fixed]", "rx_fcs": "off", "rx_all": "off", "tx_vlan_stag_hw_insert": "off [fixed]", "rx_vlan_stag_hw_parse": "off [fixed]", "rx_vlan_stag_filter": "off [fixed]", "l2_fwd_offload": "off [fixed]", "hw_tc_offload": "off [fixed]", "esp_hw_offload": "off [fixed]", "esp_tx_csum_hw_offload": "off [fixed]", "rx_udp_tunnel_port_offload": "off [fixed]", "tls_hw_tx_offload": "off [fixed]", "tls_hw_rx_offload": "off [fixed]", "rx_gro_hw": "off [fixed]", "tls_hw_record": "off [fixed]"}, "timestamping": ["tx_software", "rx_software", "software"], "hw_timestamp_filters": []}, "ansible_lo": {"device": "lo", "mtu": 65536, "active": true, "type": "loopback", "promisc": false, "ipv4": {"address": "127.0.0.1", "broadcast": "", "netmask": "255.0.0.0", "network": "127.0.0.0"}, "ipv6": [{"address": "::1", "prefix": "128", "scope": "host"}], "features": {"rx_checksumming": "on [fixed]", "tx_checksumming": "on", "tx_checksum_ipv4": "off [fixed]", "tx_checksum_ip_generic": "on [fixed]", "tx_checksum_ipv6": "off [fixed]", "tx_checksum_fcoe_crc": "off [fixed]", "tx_checksum_sctp": "on [fixed]", "scatter_gather": "on", "tx_scatter_gather": "on [fixed]", "tx_scatter_gather_fraglist": "on [fixed]", "tcp_segmentation_offload": "on", "tx_tcp_segmentation": "on", "tx_tcp_ecn_segmentation": "on", "tx_tcp_mangleid_segmentation": "on", "tx_tcp6_segmentation": "on", "generic_segmentation_offload": "on", "generic_receive_offload": "on", "large_receive_offload": "off [fixed]", "rx_vlan_offload": "off [fixed]", "tx_vlan_offload": "off [fixed]", "ntuple_filters": "off [fixed]", "receive_hashing": "off [fixed]", "highdma": "on [fixed]", "rx_vlan_filter": "off [fixed]", "vlan_challenged": "on [fixed]", "tx_lockless": "on [fixed]", "netns_local": "on [fixed]", "tx_gso_robust": "off [fixed]", "tx_fcoe_segmentation": "off [fixed]", "tx_gre_segmentation": "off [fixed]", "tx_gre_csum_segmentation": "off [fixed]", "tx_ipxip4_segmentation": "off [fixed]", "tx_ipxip6_segmentation": "off [fixed]", "tx_udp_tnl_segmentation": "off [fixed]", "tx_udp_tnl_csum_segmentation": "off [fixed]", "tx_gso_partial": "off [fixed]", "tx_sctp_segmentation": "on", "tx_esp_segmentation": "off [fixed]", "tx_udp_segmentation": "off [fixed]", "fcoe_mtu": "off [fixed]", "tx_nocache_copy": "off [fixed]", "loopback": "on [fixed]", "rx_fcs": "off [fixed]", "rx_all": "off [fixed]", "tx_vlan_stag_hw_insert": "off [fixed]", "rx_vlan_stag_hw_parse": "off [fixed]", "rx_vlan_stag_filter": "off [fixed]", "l2_fwd_offload": "off [fixed]", "hw_tc_offload": "off [fixed]", "esp_hw_offload": "off [fixed]", "esp_tx_csum_hw_offload": "off [fixed]", "rx_udp_tunnel_port_offload": "off [fixed]", "tls_hw_tx_offload": "off [fixed]", "tls_hw_rx_offload": "off [fixed]", "rx_gro_hw": "off [fixed]", "tls_hw_record": "off [fixed]"}, "timestamping": ["tx_software", "rx_software", "software"], "hw_timestamp_filters": []}, "ansible_default_ipv4": {"gateway": "10.0.2.2", "interface": "enp0s3", "address": "10.0.2.15", "broadcast": "10.0.2.255", "netmask": "255.255.255.0", "network": "10.0.2.0", "macaddress": "02:de:69:7f:c7:01", "mtu": 1500, "type": "ether", "alias": "enp0s3"}, "ansible_default_ipv6": {}, "ansible_all_ipv4_addresses": ["10.1.1.201", "10.0.2.15"], "ansible_all_ipv6_addresses": ["fe80::a00:27ff:fe67:8d69", "fe80::de:69ff:fe7f:c701"], "ansible_selinux_python_present": true, "ansible_selinux": {"status": "disabled"}, "ansible_apparmor": {"status": "enabled"}, "ansible_date_time": {"year": "2021", "month": "09", "weekday": "Thursday", "weekday_number": "4", "weeknumber": "39", "day": "30", "hour": "14", "minute": "39", "second": "09", "epoch": "1633012749", "date": "2021-09-30", "time": "14:39:09", "iso8601_micro": "2021-09-30T14:39:09.397030Z", "iso8601": "2021-09-30T14:39:09Z", "iso8601_basic": "20210930T143909397030", "iso8601_basic_short": "20210930T143909", "tz": "UTC", "tz_dst": "UTC", "tz_offset": "+0000"}, "ansible_service_mgr": "systemd", "ansible_pkg_mgr": "apt", "gather_subset": ["all"], "module_setup": true}, "invocation": {"module_args": {"gather_subset": ["all"], "gather_timeout": 10, "filter": [], "fact_path": "/etc/ansible/facts.d"}}}\n', b"Warning: Permanently added '10.1.1.201' (ECDSA) to the list of known hosts.\r\n")
ok: [test1]
META: ran handlers
TASK [debug] *******************************************************************
task path: /workspace/test.yml:5
Thursday 30 September 2021 14:39:19 +0000 (0:00:17.449) 0:00:17.494 ****
ok: [test1] => {
"msg": "Success"
}
META: ran handlers
META: ran handlers
PLAY RECAP *********************************************************************
test1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
Thursday 30 September 2021 14:39:19 +0000 (0:00:00.040) 0:00:17.534 ****
===============================================================================
Gathering Facts -------------------------------------------------------- 17.45s
/workspace/test.yml:2 ---------------------------------------------------------
debug ------------------------------------------------------------------- 0.04s
/workspace/test.yml:5 ---------------------------------------------------------
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75834
|
https://github.com/ansible/ansible/pull/76732
|
9142be2f6cabbe6597c9254c5bb9186d17036d55
|
0ff80a15ba40c2aff3b96c1152f19c97a92d3c97
| 2021-09-30T08:41:28Z |
python
| 2022-01-13T21:28:09Z |
changelogs/fragments/ssh_debug_noparse.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,834 |
Extra Verbosity fails play
|
### Summary
When I use `become: yes` and turn the logging verbosity to `-vvv` it works perfectly fine. When the verbosity is increased to `-vvvv` every run fails at the very beginning. Without `become: yes` even the `-vvvv` passes.
### Issue Type
Bug Report
### Component Name
Core
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = None
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.8.10 (default, Jun 2 2021, 10:49:15) [GCC 9.4.0]
jinja version = 3.0.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
ANSIBLE_PIPELINING(/workspace/ansible.cfg) = True
CALLBACKS_ENABLED(/workspace/ansible.cfg) = ['ansible.posix.profile_tasks', 'community.general.log_plays']
DEFAULT_FORKS(/workspace/ansible.cfg) = 10
DEFAULT_GATHERING(/workspace/ansible.cfg) = smart
DEFAULT_ROLES_PATH(env: ANSIBLE_ROLES_PATH) = ['/workspace/common-ansible/roles']
INTERPRETER_PYTHON(/workspace/ansible.cfg) = auto
```
### OS / Environment
Ubuntu 20.04.3 LTS
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
---
- hosts: gitlab_runners
become: true
tasks:
- debug:
msg=Success
```
### Expected Results
Play should run through with a lot of debug messages.
### Actual Results
Ouptut with `-vvvv`
```console
ansible-playbook [core 2.11.5]
config file = /workspace/ansible.cfg
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible-playbook
python version = 3.8.10 (default, Jun 2 2021, 10:49:15) [GCC 9.4.0]
jinja version = 3.0.1
libyaml = True
Using /workspace/ansible.cfg as config file
setting up inventory plugins
host_list declined parsing /workspace/inventories/test/hosts.yml as it did not pass its verify_file() method
script declined parsing /workspace/inventories/test/hosts.yml as it did not pass its verify_file() method
Parsed /workspace/inventories/test/hosts.yml inventory source with yaml plugin
Loading callback plugin default of type stdout, v2.0 from /usr/local/lib/python3.8/dist-packages/ansible/plugins/callback/default.py
Loading collection ansible.posix from /usr/local/lib/python3.8/dist-packages/ansible_collections/ansible/posix
Loading collection community.general from /usr/local/lib/python3.8/dist-packages/ansible_collections/community/general
Skipping callback 'default', as we already have a stdout callback.
Skipping callback 'minimal', as we already have a stdout callback.
Skipping callback 'oneline', as we already have a stdout callback.
Loading callback plugin ansible.posix.profile_tasks of type aggregate, v2.0 from /usr/local/lib/python3.8/dist-packages/ansible_collections/ansible/posix/plugins/callback/profile_tasks.py
Loading callback plugin community.general.log_plays of type notification, v2.0 from /usr/local/lib/python3.8/dist-packages/ansible_collections/community/general/plugins/callback/log_plays.py
PLAYBOOK: test.yml *************************************************************
Positional arguments: test.yml
verbosity: 4
connection: smart
timeout: 10
become_method: sudo
tags: ('all',)
inventory: ('/workspace/inventories/test/hosts.yml',)
forks: 10
1 plays in test.yml
PLAY [gitlab_runners] **********************************************************
Trying secret ScriptVaultSecret(filename='/opt/print_vault_password.py') for vault_id=default
TASK [Gathering Facts] *********************************************************
task path: /workspace/test.yml:2
Thursday 30 September 2021 14:38:16 +0000 (0:00:00.060) 0:00:00.060 ****
Using module file /usr/local/lib/python3.8/dist-packages/ansible/modules/setup.py
Pipelining is enabled.
<10.1.1.201> ESTABLISH SSH CONNECTION FOR USER: manager
<10.1.1.201> SSH: EXEC sshpass -d10 ssh -vvv -C -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o 'User="manager"' -o ConnectTimeout=10 -o StrictHostKeyChecking=no -o ControlPath=/root/.ansible/cp/48372ad7a3 10.1.1.201 '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-muszsaqzcjtetkenkeiiqhqnnsdqlvmx ; python3'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<10.1.1.201> (1, b'', b'OpenSSH_8.2p1 Ubuntu-4ubuntu0.3, OpenSSL 1.1.1f 31 Mar 2020\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files\r\ndebug1: /etc/ssh/ssh_config line 21: Applying options for *\r\ndebug2: resolve_canonicalize: hostname 10.1.1.201 is address\r\ndebug1: auto-mux: Trying existing master\r\ndebug1: Control socket "/root/.ansible/cp/48372ad7a3" does not exist\r\ndebug2: ssh_connect_direct\r\ndebug1: Connecting to 10.1.1.201 [10.1.1.201] port 22.\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: fd 3 clearing O_NONBLOCK\r\ndebug1: Connection established.\r\ndebug3: timeout: 10000 ms remain after connect\r\ndebug1: SELinux support disabled\r\ndebug1: identity file /root/.ssh/id_rsa type -1\r\ndebug1: identity file /root/.ssh/id_rsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_dsa type -1\r\ndebug1: identity file /root/.ssh/id_dsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa_sk type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa_sk-cert type -1\r\ndebug1: identity file /root/.ssh/id_ed25519 type -1\r\ndebug1: identity file /root/.ssh/id_ed25519-cert type -1\r\ndebug1: identity file /root/.ssh/id_ed25519_sk type -1\r\ndebug1: identity file /root/.ssh/id_ed25519_sk-cert type -1\r\ndebug1: identity file /root/.ssh/id_xmss type -1\r\ndebug1: identity file /root/.ssh/id_xmss-cert type -1\r\ndebug1: Local version string SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3\r\ndebug1: Remote protocol version 2.0, remote software version OpenSSH_8.2p1 Ubuntu-4ubuntu0.3\r\ndebug1: match: OpenSSH_8.2p1 Ubuntu-4ubuntu0.3 pat OpenSSH* compat 0x04000000\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: Authenticating to 10.1.1.201:22 as \'manager\'\r\ndebug3: send packet: type 20\r\ndebug1: SSH2_MSG_KEXINIT sent\r\ndebug3: receive packet: type 20\r\ndebug1: SSH2_MSG_KEXINIT received\r\ndebug2: local client KEXINIT proposal\r\ndebug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256,ext-info-c\r\ndebug2: host key algorithms: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected],ssh-ed25519,[email protected],rsa-sha2-512,rsa-sha2-256,ssh-rsa\r\ndebug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: compression ctos: [email protected],zlib,none\r\ndebug2: compression stoc: [email protected],zlib,none\r\ndebug2: languages ctos: \r\ndebug2: languages stoc: \r\ndebug2: first_kex_follows 0 \r\ndebug2: reserved 0 \r\ndebug2: peer server KEXINIT proposal\r\ndebug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256\r\ndebug2: host key algorithms: rsa-sha2-512,rsa-sha2-256,ssh-rsa,ecdsa-sha2-nistp256,ssh-ed25519\r\ndebug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: compression ctos: none,[email protected]\r\ndebug2: compression stoc: none,[email protected]\r\ndebug2: languages ctos: \r\ndebug2: languages stoc: \r\ndebug2: first_kex_follows 0 \r\ndebug2: reserved 0 \r\ndebug1: kex: algorithm: curve25519-sha256\r\ndebug1: kex: host key algorithm: ecdsa-sha2-nistp256\r\ndebug1: kex: server->client cipher: [email protected] MAC: <implicit> compression: [email protected]\r\ndebug1: kex: client->server cipher: [email protected] MAC: <implicit> compression: [email protected]\r\ndebug3: send packet: type 30\r\ndebug1: expecting SSH2_MSG_KEX_ECDH_REPLY\r\ndebug3: receive packet: type 31\r\ndebug1: Server host key: ecdsa-sha2-nistp256 SHA256:LylIOfTqWm+geE35hisc6xKHr/ZHTHTs/UQn80pYXOo\r\nWarning: Permanently added \'10.1.1.201\' (ECDSA) to the list of known hosts.\r\ndebug3: send packet: type 21\r\ndebug2: set_newkeys: mode 1\r\ndebug1: rekey out after 134217728 blocks\r\ndebug1: SSH2_MSG_NEWKEYS sent\r\ndebug1: expecting SSH2_MSG_NEWKEYS\r\ndebug3: receive packet: type 21\r\ndebug1: SSH2_MSG_NEWKEYS received\r\ndebug2: set_newkeys: mode 0\r\ndebug1: rekey in after 134217728 blocks\r\ndebug1: Will attempt key: /root/.ssh/id_rsa \r\ndebug1: Will attempt key: /root/.ssh/id_dsa \r\ndebug1: Will attempt key: /root/.ssh/id_ecdsa \r\ndebug1: Will attempt key: /root/.ssh/id_ecdsa_sk \r\ndebug1: Will attempt key: /root/.ssh/id_ed25519 \r\ndebug1: Will attempt key: /root/.ssh/id_ed25519_sk \r\ndebug1: Will attempt key: /root/.ssh/id_xmss \r\ndebug2: pubkey_prepare: done\r\ndebug3: send packet: type 5\r\ndebug3: receive packet: type 7\r\ndebug1: SSH2_MSG_EXT_INFO received\r\ndebug1: kex_input_ext_info: server-sig-algs=<ssh-ed25519,[email protected],ssh-rsa,rsa-sha2-256,rsa-sha2-512,ssh-dss,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected]>\r\ndebug3: receive packet: type 6\r\ndebug2: service_accept: ssh-userauth\r\ndebug1: SSH2_MSG_SERVICE_ACCEPT received\r\ndebug3: send packet: type 50\r\ndebug3: receive packet: type 51\r\ndebug1: Authentications that can continue: publickey,password\r\ndebug3: start over, passed a different list publickey,password\r\ndebug3: preferred gssapi-with-mic,publickey,keyboard-interactive,password\r\ndebug3: authmethod_lookup publickey\r\ndebug3: remaining preferred: keyboard-interactive,password\r\ndebug3: authmethod_is_enabled publickey\r\ndebug1: Next authentication method: publickey\r\ndebug1: Trying private key: /root/.ssh/id_rsa\r\ndebug3: no such identity: /root/.ssh/id_rsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_dsa\r\ndebug3: no such identity: /root/.ssh/id_dsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ecdsa\r\ndebug3: no such identity: /root/.ssh/id_ecdsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ecdsa_sk\r\ndebug3: no such identity: /root/.ssh/id_ecdsa_sk: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ed25519\r\ndebug3: no such identity: /root/.ssh/id_ed25519: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ed25519_sk\r\ndebug3: no such identity: /root/.ssh/id_ed25519_sk: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_xmss\r\ndebug3: no such identity: /root/.ssh/id_xmss: No such file or directory\r\ndebug2: we did not send a packet, disable method\r\ndebug3: authmethod_lookup password\r\ndebug3: remaining preferred: ,password\r\ndebug3: authmethod_is_enabled password\r\ndebug1: Next authentication method: password\r\ndebug3: send packet: type 50\r\ndebug2: we sent a password packet, wait for reply\r\ndebug3: receive packet: type 52\r\ndebug1: Enabling compression at level 6.\r\ndebug1: Authentication succeeded (password).\r\nAuthenticated to 10.1.1.201 ([10.1.1.201]:22).\r\ndebug1: setting up multiplex master socket\r\ndebug3: muxserver_listen: temporary control path /root/.ansible/cp/48372ad7a3.XW29Aj350ciuha3l\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug3: fd 4 is O_NONBLOCK\r\ndebug3: fd 4 is O_NONBLOCK\r\ndebug1: channel 0: new [/root/.ansible/cp/48372ad7a3]\r\ndebug3: muxserver_listen: mux listener channel 0 fd 4\r\ndebug2: fd 3 setting TCP_NODELAY\r\ndebug3: ssh_packet_set_tos: set IP_TOS 0x08\r\ndebug1: control_persist_detach: backgrounding master process\r\ndebug2: control_persist_detach: background process is 16\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug1: forking to background\r\ndebug1: Entering interactive session.\r\ndebug1: pledge: id\r\ndebug2: set_control_persist_exit_time: schedule exit in 60 seconds\r\ndebug1: multiplexing control connection\r\ndebug2: fd 5 setting O_NONBLOCK\r\ndebug3: fd 5 is O_NONBLOCK\r\ndebug1: channel 1: new [mux-control]\r\ndebug3: channel_post_mux_listener: new mux channel 1 fd 5\r\ndebug3: mux_master_read_cb: channel 1: hello sent\r\ndebug2: set_control_persist_exit_time: cancel scheduled exit\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x00000001 len 4\r\ndebug2: mux_master_process_hello: channel 1 slave version 4\r\ndebug2: mux_client_hello_exchange: master version 4\r\ndebug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r\ndebug3: mux_client_request_session: entering\r\ndebug3: mux_client_request_alive: entering\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x10000004 len 4\r\ndebug2: mux_master_process_alive_check: channel 1: alive check\r\ndebug3: mux_client_request_alive: done pid = 18\r\ndebug3: mux_client_request_session: session request sent\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 256\r\ndebug3: mux_master_process_new_session: got fds stdin 6, stdout 7, stderr 8\r\ndebug2: fd 6 setting O_NONBLOCK\r\ndebug2: fd 7 setting O_NONBLOCK\r\ndebug2: fd 8 setting O_NONBLOCK\r\ndebug1: channel 2: new [client-session]\r\ndebug2: mux_master_process_new_session: channel_new: 2 linked to control channel 1\r\ndebug2: channel 2: send open\r\ndebug3: send packet: type 90\r\ndebug3: receive packet: type 80\r\ndebug1: client_input_global_request: rtype [email protected] want_reply 0\r\ndebug3: receive packet: type 91\r\ndebug2: channel_input_open_confirmation: channel 2: callback start\r\ndebug2: client_session2_setup: id 2\r\ndebug1: Sending environment.\r\ndebug1: Sending env LANG = C.UTF-8\r\ndebug2: channel 2: request env confirm 0\r\ndebug3: send packet: type 98\r\ndebug1: Sending command: /bin/sh -c \'sudo -H -S -p "[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:" -u root /bin/sh -c \'"\'"\'echo BECOME-SUCCESS-muszsaqzcjtetkenkeiiqhqnnsdqlvmx ; python3\'"\'"\' && sleep 0\'\r\ndebug2: channel 2: request exec confirm 1\r\ndebug3: send packet: type 98\r\ndebug3: mux_session_confirm: sending success reply\r\ndebug2: channel_input_open_confirmation: channel 2: callback done\r\ndebug2: channel 2: open confirm rwindow 0 rmax 32768\r\ndebug2: channel 2: rcvd adjust 2097152\r\ndebug3: receive packet: type 99\r\ndebug2: channel_input_status_confirm: type 99 id 2\r\ndebug2: exec request accepted on channel 2\r\ndebug2: channel 2: read<=0 rfd 6 len 0\r\ndebug2: channel 2: read failed\r\ndebug2: channel 2: chan_shutdown_read (i0 o0 sock -1 wfd 6 efd 8 [write])\r\ndebug2: channel 2: input open -> drain\r\ndebug2: channel 2: ibuf empty\r\ndebug2: channel 2: send eof\r\ndebug3: send packet: type 96\r\ndebug2: channel 2: input drain -> closed\r\ndebug2: channel 2: rcvd adjust 65536\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 18\r\nSorry, try again.\ndebug2: channel 2: written 18 to efd 8\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 18\r\nSorry, try again.\ndebug2: channel 2: written 18 to efd 8\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 36\r\nsudo: 3 incorrect password attempts\ndebug2: channel 2: written 36 to efd 8\r\ndebug3: receive packet: type 98\r\ndebug1: client_input_channel_req: channel 2 rtype [email protected] reply 0\r\ndebug2: channel 2: rcvd eow\r\ndebug3: receive packet: type 96\r\ndebug2: channel 2: rcvd eof\r\ndebug2: channel 2: output open -> drain\r\ndebug2: channel 2: obuf empty\r\ndebug2: channel 2: chan_shutdown_write (i3 o1 sock -1 wfd 7 efd 8 [write])\r\ndebug2: channel 2: output drain -> closed\r\ndebug2: channel 2: send close\r\ndebug3: send packet: type 97\r\ndebug3: channel 2: will not send data after close\r\ndebug3: receive packet: type 98\r\ndebug1: client_input_channel_req: channel 2 rtype exit-status reply 0\r\ndebug3: mux_exit_message: channel 2: exit message, exitval 1\r\ndebug3: receive packet: type 97\r\ndebug2: channel 2: rcvd close\r\ndebug3: channel 2: will not send data after close\r\ndebug2: channel 2: is dead\r\ndebug2: channel 2: gc: notify user\r\ndebug3: mux_master_session_cleanup_cb: entering for channel 2\r\ndebug2: channel 1: rcvd close\r\ndebug2: channel 1: output open -> drain\r\ndebug2: channel 1: chan_shutdown_read (i0 o1 sock 5 wfd 5 efd -1 [closed])\r\ndebug2: channel 1: input open -> closed\r\ndebug2: channel 2: gc: user detached\r\ndebug2: channel 2: is dead\r\ndebug2: channel 2: garbage collecting\r\ndebug1: channel 2: free: client-session, nchannels 3\r\ndebug3: channel 2: status: The following connections are open:\r\n #1 mux-control (t16 nr0 i3/0 o1/16 e[closed]/0 fd 5/5/-1 sock 5 cc -1)\r\n #2 client-session (t4 r0 i3/0 o3/0 e[write]/0 fd -1/-1/8 sock -1 cc -1)\r\n\r\ndebug2: channel 1: obuf empty\r\ndebug2: channel 1: chan_shutdown_write (i3 o1 sock 5 wfd 5 efd -1 [closed])\r\ndebug2: channel 1: output drain -> closed\r\ndebug2: channel 1: is dead (local)\r\ndebug2: channel 1: gc: notify user\r\ndebug3: mux_master_control_cleanup_cb: entering for channel 1\r\ndebug2: channel 1: gc: user detached\r\ndebug2: channel 1: is dead (local)\r\ndebug2: channel 1: garbage collecting\r\ndebug1: channel 1: free: mux-control, nchannels 2\r\ndebug3: channel 1: status: The following connections are open:\r\n #1 mux-control (t16 nr0 i3/0 o3/0 e[closed]/0 fd 5/5/-1 sock 5 cc -1)\r\n\r\ndebug2: set_control_persist_exit_time: schedule exit in 60 seconds\r\ndebug3: mux_client_read_packet: read header failed: Broken pipe\r\ndebug2: Received exit status from master 1\r\n')
<10.1.1.201> Failed to connect to the host via ssh: OpenSSH_8.2p1 Ubuntu-4ubuntu0.3, OpenSSL 1.1.1f 31 Mar 2020
debug1: Reading configuration data /etc/ssh/ssh_config
debug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files
debug1: /etc/ssh/ssh_config line 21: Applying options for *
debug2: resolve_canonicalize: hostname 10.1.1.201 is address
debug1: auto-mux: Trying existing master
debug1: Control socket "/root/.ansible/cp/48372ad7a3" does not exist
debug2: ssh_connect_direct
debug1: Connecting to 10.1.1.201 [10.1.1.201] port 22.
debug2: fd 3 setting O_NONBLOCK
debug1: fd 3 clearing O_NONBLOCK
debug1: Connection established.
debug3: timeout: 10000 ms remain after connect
debug1: SELinux support disabled
debug1: identity file /root/.ssh/id_rsa type -1
debug1: identity file /root/.ssh/id_rsa-cert type -1
debug1: identity file /root/.ssh/id_dsa type -1
debug1: identity file /root/.ssh/id_dsa-cert type -1
debug1: identity file /root/.ssh/id_ecdsa type -1
debug1: identity file /root/.ssh/id_ecdsa-cert type -1
debug1: identity file /root/.ssh/id_ecdsa_sk type -1
debug1: identity file /root/.ssh/id_ecdsa_sk-cert type -1
debug1: identity file /root/.ssh/id_ed25519 type -1
debug1: identity file /root/.ssh/id_ed25519-cert type -1
debug1: identity file /root/.ssh/id_ed25519_sk type -1
debug1: identity file /root/.ssh/id_ed25519_sk-cert type -1
debug1: identity file /root/.ssh/id_xmss type -1
debug1: identity file /root/.ssh/id_xmss-cert type -1
debug1: Local version string SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3
debug1: Remote protocol version 2.0, remote software version OpenSSH_8.2p1 Ubuntu-4ubuntu0.3
debug1: match: OpenSSH_8.2p1 Ubuntu-4ubuntu0.3 pat OpenSSH* compat 0x04000000
debug2: fd 3 setting O_NONBLOCK
debug1: Authenticating to 10.1.1.201:22 as 'manager'
debug3: send packet: type 20
debug1: SSH2_MSG_KEXINIT sent
debug3: receive packet: type 20
debug1: SSH2_MSG_KEXINIT received
debug2: local client KEXINIT proposal
debug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256,ext-info-c
debug2: host key algorithms: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected],ssh-ed25519,[email protected],rsa-sha2-512,rsa-sha2-256,ssh-rsa
debug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]
debug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]
debug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1
debug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1
debug2: compression ctos: [email protected],zlib,none
debug2: compression stoc: [email protected],zlib,none
debug2: languages ctos:
debug2: languages stoc:
debug2: first_kex_follows 0
debug2: reserved 0
debug2: peer server KEXINIT proposal
debug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256
debug2: host key algorithms: rsa-sha2-512,rsa-sha2-256,ssh-rsa,ecdsa-sha2-nistp256,ssh-ed25519
debug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]
debug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]
debug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1
debug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1
debug2: compression ctos: none,[email protected]
debug2: compression stoc: none,[email protected]
debug2: languages ctos:
debug2: languages stoc:
debug2: first_kex_follows 0
debug2: reserved 0
debug1: kex: algorithm: curve25519-sha256
debug1: kex: host key algorithm: ecdsa-sha2-nistp256
debug1: kex: server->client cipher: [email protected] MAC: <implicit> compression: [email protected]
debug1: kex: client->server cipher: [email protected] MAC: <implicit> compression: [email protected]
debug3: send packet: type 30
debug1: expecting SSH2_MSG_KEX_ECDH_REPLY
debug3: receive packet: type 31
debug1: Server host key: ecdsa-sha2-nistp256 SHA256:LylIOfTqWm+geE35hisc6xKHr/ZHTHTs/UQn80pYXOo
Warning: Permanently added '10.1.1.201' (ECDSA) to the list of known hosts.
debug3: send packet: type 21
debug2: set_newkeys: mode 1
debug1: rekey out after 134217728 blocks
debug1: SSH2_MSG_NEWKEYS sent
debug1: expecting SSH2_MSG_NEWKEYS
debug3: receive packet: type 21
debug1: SSH2_MSG_NEWKEYS received
debug2: set_newkeys: mode 0
debug1: rekey in after 134217728 blocks
debug1: Will attempt key: /root/.ssh/id_rsa
debug1: Will attempt key: /root/.ssh/id_dsa
debug1: Will attempt key: /root/.ssh/id_ecdsa
debug1: Will attempt key: /root/.ssh/id_ecdsa_sk
debug1: Will attempt key: /root/.ssh/id_ed25519
debug1: Will attempt key: /root/.ssh/id_ed25519_sk
debug1: Will attempt key: /root/.ssh/id_xmss
debug2: pubkey_prepare: done
debug3: send packet: type 5
debug3: receive packet: type 7
debug1: SSH2_MSG_EXT_INFO received
debug1: kex_input_ext_info: server-sig-algs=<ssh-ed25519,[email protected],ssh-rsa,rsa-sha2-256,rsa-sha2-512,ssh-dss,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected]>
debug3: receive packet: type 6
debug2: service_accept: ssh-userauth
debug1: SSH2_MSG_SERVICE_ACCEPT received
debug3: send packet: type 50
debug3: receive packet: type 51
debug1: Authentications that can continue: publickey,password
debug3: start over, passed a different list publickey,password
debug3: preferred gssapi-with-mic,publickey,keyboard-interactive,password
debug3: authmethod_lookup publickey
debug3: remaining preferred: keyboard-interactive,password
debug3: authmethod_is_enabled publickey
debug1: Next authentication method: publickey
debug1: Trying private key: /root/.ssh/id_rsa
debug3: no such identity: /root/.ssh/id_rsa: No such file or directory
debug1: Trying private key: /root/.ssh/id_dsa
debug3: no such identity: /root/.ssh/id_dsa: No such file or directory
debug1: Trying private key: /root/.ssh/id_ecdsa
debug3: no such identity: /root/.ssh/id_ecdsa: No such file or directory
debug1: Trying private key: /root/.ssh/id_ecdsa_sk
debug3: no such identity: /root/.ssh/id_ecdsa_sk: No such file or directory
debug1: Trying private key: /root/.ssh/id_ed25519
debug3: no such identity: /root/.ssh/id_ed25519: No such file or directory
debug1: Trying private key: /root/.ssh/id_ed25519_sk
debug3: no such identity: /root/.ssh/id_ed25519_sk: No such file or directory
debug1: Trying private key: /root/.ssh/id_xmss
debug3: no such identity: /root/.ssh/id_xmss: No such file or directory
debug2: we did not send a packet, disable method
debug3: authmethod_lookup password
debug3: remaining preferred: ,password
debug3: authmethod_is_enabled password
debug1: Next authentication method: password
debug3: send packet: type 50
debug2: we sent a password packet, wait for reply
debug3: receive packet: type 52
debug1: Enabling compression at level 6.
debug1: Authentication succeeded (password).
Authenticated to 10.1.1.201 ([10.1.1.201]:22).
debug1: setting up multiplex master socket
debug3: muxserver_listen: temporary control path /root/.ansible/cp/48372ad7a3.XW29Aj350ciuha3l
debug2: fd 4 setting O_NONBLOCK
debug3: fd 4 is O_NONBLOCK
debug3: fd 4 is O_NONBLOCK
debug1: channel 0: new [/root/.ansible/cp/48372ad7a3]
debug3: muxserver_listen: mux listener channel 0 fd 4
debug2: fd 3 setting TCP_NODELAY
debug3: ssh_packet_set_tos: set IP_TOS 0x08
debug1: control_persist_detach: backgrounding master process
debug2: control_persist_detach: background process is 16
debug2: fd 4 setting O_NONBLOCK
debug1: forking to background
debug1: Entering interactive session.
debug1: pledge: id
debug2: set_control_persist_exit_time: schedule exit in 60 seconds
debug1: multiplexing control connection
debug2: fd 5 setting O_NONBLOCK
debug3: fd 5 is O_NONBLOCK
debug1: channel 1: new [mux-control]
debug3: channel_post_mux_listener: new mux channel 1 fd 5
debug3: mux_master_read_cb: channel 1: hello sent
debug2: set_control_persist_exit_time: cancel scheduled exit
debug3: mux_master_read_cb: channel 1 packet type 0x00000001 len 4
debug2: mux_master_process_hello: channel 1 slave version 4
debug2: mux_client_hello_exchange: master version 4
debug3: mux_client_forwards: request forwardings: 0 local, 0 remote
debug3: mux_client_request_session: entering
debug3: mux_client_request_alive: entering
debug3: mux_master_read_cb: channel 1 packet type 0x10000004 len 4
debug2: mux_master_process_alive_check: channel 1: alive check
debug3: mux_client_request_alive: done pid = 18
debug3: mux_client_request_session: session request sent
debug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 256
debug3: mux_master_process_new_session: got fds stdin 6, stdout 7, stderr 8
debug2: fd 6 setting O_NONBLOCK
debug2: fd 7 setting O_NONBLOCK
debug2: fd 8 setting O_NONBLOCK
debug1: channel 2: new [client-session]
debug2: mux_master_process_new_session: channel_new: 2 linked to control channel 1
debug2: channel 2: send open
debug3: send packet: type 90
debug3: receive packet: type 80
debug1: client_input_global_request: rtype [email protected] want_reply 0
debug3: receive packet: type 91
debug2: channel_input_open_confirmation: channel 2: callback start
debug2: client_session2_setup: id 2
debug1: Sending environment.
debug1: Sending env LANG = C.UTF-8
debug2: channel 2: request env confirm 0
debug3: send packet: type 98
debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:" -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-muszsaqzcjtetkenkeiiqhqnnsdqlvmx ; python3'"'"' && sleep 0'
debug2: channel 2: request exec confirm 1
debug3: send packet: type 98
debug3: mux_session_confirm: sending success reply
debug2: channel_input_open_confirmation: channel 2: callback done
debug2: channel 2: open confirm rwindow 0 rmax 32768
debug2: channel 2: rcvd adjust 2097152
debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: exec request accepted on channel 2
debug2: channel 2: read<=0 rfd 6 len 0
debug2: channel 2: read failed
debug2: channel 2: chan_shutdown_read (i0 o0 sock -1 wfd 6 efd 8 [write])
debug2: channel 2: input open -> drain
debug2: channel 2: ibuf empty
debug2: channel 2: send eof
debug3: send packet: type 96
debug2: channel 2: input drain -> closed
debug2: channel 2: rcvd adjust 65536
debug2: channel 2: rcvd ext data 66
[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8
debug2: channel 2: rcvd ext data 18
Sorry, try again.
debug2: channel 2: written 18 to efd 8
debug2: channel 2: rcvd ext data 66
[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8
debug2: channel 2: rcvd ext data 18
Sorry, try again.
debug2: channel 2: written 18 to efd 8
debug2: channel 2: rcvd ext data 66
[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8
debug2: channel 2: rcvd ext data 36
sudo: 3 incorrect password attempts
debug2: channel 2: written 36 to efd 8
debug3: receive packet: type 98
debug1: client_input_channel_req: channel 2 rtype [email protected] reply 0
debug2: channel 2: rcvd eow
debug3: receive packet: type 96
debug2: channel 2: rcvd eof
debug2: channel 2: output open -> drain
debug2: channel 2: obuf empty
debug2: channel 2: chan_shutdown_write (i3 o1 sock -1 wfd 7 efd 8 [write])
debug2: channel 2: output drain -> closed
debug2: channel 2: send close
debug3: send packet: type 97
debug3: channel 2: will not send data after close
debug3: receive packet: type 98
debug1: client_input_channel_req: channel 2 rtype exit-status reply 0
debug3: mux_exit_message: channel 2: exit message, exitval 1
debug3: receive packet: type 97
debug2: channel 2: rcvd close
debug3: channel 2: will not send data after close
debug2: channel 2: is dead
debug2: channel 2: gc: notify user
debug3: mux_master_session_cleanup_cb: entering for channel 2
debug2: channel 1: rcvd close
debug2: channel 1: output open -> drain
debug2: channel 1: chan_shutdown_read (i0 o1 sock 5 wfd 5 efd -1 [closed])
debug2: channel 1: input open -> closed
debug2: channel 2: gc: user detached
debug2: channel 2: is dead
debug2: channel 2: garbage collecting
debug1: channel 2: free: client-session, nchannels 3
debug3: channel 2: status: The following connections are open:
#1 mux-control (t16 nr0 i3/0 o1/16 e[closed]/0 fd 5/5/-1 sock 5 cc -1)
#2 client-session (t4 r0 i3/0 o3/0 e[write]/0 fd -1/-1/8 sock -1 cc -1)
debug2: channel 1: obuf empty
debug2: channel 1: chan_shutdown_write (i3 o1 sock 5 wfd 5 efd -1 [closed])
debug2: channel 1: output drain -> closed
debug2: channel 1: is dead (local)
debug2: channel 1: gc: notify user
debug3: mux_master_control_cleanup_cb: entering for channel 1
debug2: channel 1: gc: user detached
debug2: channel 1: is dead (local)
debug2: channel 1: garbage collecting
debug1: channel 1: free: mux-control, nchannels 2
debug3: channel 1: status: The following connections are open:
#1 mux-control (t16 nr0 i3/0 o3/0 e[closed]/0 fd 5/5/-1 sock 5 cc -1)
debug2: set_control_persist_exit_time: schedule exit in 60 seconds
debug3: mux_client_read_packet: read header failed: Broken pipe
debug2: Received exit status from master 1
fatal: [test1]: FAILED! => {
"ansible_facts": {},
"changed": false,
"failed_modules": {
"ansible.legacy.setup": {
"failed": true,
"module_stderr": "OpenSSH_8.2p1 Ubuntu-4ubuntu0.3, OpenSSL 1.1.1f 31 Mar 2020\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files\r\ndebug1: /etc/ssh/ssh_config line 21: Applying options for *\r\ndebug2: resolve_canonicalize: hostname 10.1.1.201 is address\r\ndebug1: auto-mux: Trying existing master\r\ndebug1: Control socket \"/root/.ansible/cp/48372ad7a3\" does not exist\r\ndebug2: ssh_connect_direct\r\ndebug1: Connecting to 10.1.1.201 [10.1.1.201] port 22.\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: fd 3 clearing O_NONBLOCK\r\ndebug1: Connection established.\r\ndebug3: timeout: 10000 ms remain after connect\r\ndebug1: SELinux support disabled\r\ndebug1: identity file /root/.ssh/id_rsa type -1\r\ndebug1: identity file /root/.ssh/id_rsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_dsa type -1\r\ndebug1: identity file /root/.ssh/id_dsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa_sk type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa_sk-cert type -1\r\ndebug1: identity file /root/.ssh/id_ed25519 type -1\r\ndebug1: identity file /root/.ssh/id_ed25519-cert type -1\r\ndebug1: identity file /root/.ssh/id_ed25519_sk type -1\r\ndebug1: identity file /root/.ssh/id_ed25519_sk-cert type -1\r\ndebug1: identity file /root/.ssh/id_xmss type -1\r\ndebug1: identity file /root/.ssh/id_xmss-cert type -1\r\ndebug1: Local version string SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3\r\ndebug1: Remote protocol version 2.0, remote software version OpenSSH_8.2p1 Ubuntu-4ubuntu0.3\r\ndebug1: match: OpenSSH_8.2p1 Ubuntu-4ubuntu0.3 pat OpenSSH* compat 0x04000000\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: Authenticating to 10.1.1.201:22 as 'manager'\r\ndebug3: send packet: type 20\r\ndebug1: SSH2_MSG_KEXINIT sent\r\ndebug3: receive packet: type 20\r\ndebug1: SSH2_MSG_KEXINIT received\r\ndebug2: local client KEXINIT proposal\r\ndebug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256,ext-info-c\r\ndebug2: host key algorithms: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected],ssh-ed25519,[email protected],rsa-sha2-512,rsa-sha2-256,ssh-rsa\r\ndebug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: compression ctos: [email protected],zlib,none\r\ndebug2: compression stoc: [email protected],zlib,none\r\ndebug2: languages ctos: \r\ndebug2: languages stoc: \r\ndebug2: first_kex_follows 0 \r\ndebug2: reserved 0 \r\ndebug2: peer server KEXINIT proposal\r\ndebug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256\r\ndebug2: host key algorithms: rsa-sha2-512,rsa-sha2-256,ssh-rsa,ecdsa-sha2-nistp256,ssh-ed25519\r\ndebug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: compression ctos: none,[email protected]\r\ndebug2: compression stoc: none,[email protected]\r\ndebug2: languages ctos: \r\ndebug2: languages stoc: \r\ndebug2: first_kex_follows 0 \r\ndebug2: reserved 0 \r\ndebug1: kex: algorithm: curve25519-sha256\r\ndebug1: kex: host key algorithm: ecdsa-sha2-nistp256\r\ndebug1: kex: server->client cipher: [email protected] MAC: <implicit> compression: [email protected]\r\ndebug1: kex: client->server cipher: [email protected] MAC: <implicit> compression: [email protected]\r\ndebug3: send packet: type 30\r\ndebug1: expecting SSH2_MSG_KEX_ECDH_REPLY\r\ndebug3: receive packet: type 31\r\ndebug1: Server host key: ecdsa-sha2-nistp256 SHA256:LylIOfTqWm+geE35hisc6xKHr/ZHTHTs/UQn80pYXOo\r\nWarning: Permanently added '10.1.1.201' (ECDSA) to the list of known hosts.\r\ndebug3: send packet: type 21\r\ndebug2: set_newkeys: mode 1\r\ndebug1: rekey out after 134217728 blocks\r\ndebug1: SSH2_MSG_NEWKEYS sent\r\ndebug1: expecting SSH2_MSG_NEWKEYS\r\ndebug3: receive packet: type 21\r\ndebug1: SSH2_MSG_NEWKEYS received\r\ndebug2: set_newkeys: mode 0\r\ndebug1: rekey in after 134217728 blocks\r\ndebug1: Will attempt key: /root/.ssh/id_rsa \r\ndebug1: Will attempt key: /root/.ssh/id_dsa \r\ndebug1: Will attempt key: /root/.ssh/id_ecdsa \r\ndebug1: Will attempt key: /root/.ssh/id_ecdsa_sk \r\ndebug1: Will attempt key: /root/.ssh/id_ed25519 \r\ndebug1: Will attempt key: /root/.ssh/id_ed25519_sk \r\ndebug1: Will attempt key: /root/.ssh/id_xmss \r\ndebug2: pubkey_prepare: done\r\ndebug3: send packet: type 5\r\ndebug3: receive packet: type 7\r\ndebug1: SSH2_MSG_EXT_INFO received\r\ndebug1: kex_input_ext_info: server-sig-algs=<ssh-ed25519,[email protected],ssh-rsa,rsa-sha2-256,rsa-sha2-512,ssh-dss,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected]>\r\ndebug3: receive packet: type 6\r\ndebug2: service_accept: ssh-userauth\r\ndebug1: SSH2_MSG_SERVICE_ACCEPT received\r\ndebug3: send packet: type 50\r\ndebug3: receive packet: type 51\r\ndebug1: Authentications that can continue: publickey,password\r\ndebug3: start over, passed a different list publickey,password\r\ndebug3: preferred gssapi-with-mic,publickey,keyboard-interactive,password\r\ndebug3: authmethod_lookup publickey\r\ndebug3: remaining preferred: keyboard-interactive,password\r\ndebug3: authmethod_is_enabled publickey\r\ndebug1: Next authentication method: publickey\r\ndebug1: Trying private key: /root/.ssh/id_rsa\r\ndebug3: no such identity: /root/.ssh/id_rsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_dsa\r\ndebug3: no such identity: /root/.ssh/id_dsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ecdsa\r\ndebug3: no such identity: /root/.ssh/id_ecdsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ecdsa_sk\r\ndebug3: no such identity: /root/.ssh/id_ecdsa_sk: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ed25519\r\ndebug3: no such identity: /root/.ssh/id_ed25519: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ed25519_sk\r\ndebug3: no such identity: /root/.ssh/id_ed25519_sk: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_xmss\r\ndebug3: no such identity: /root/.ssh/id_xmss: No such file or directory\r\ndebug2: we did not send a packet, disable method\r\ndebug3: authmethod_lookup password\r\ndebug3: remaining preferred: ,password\r\ndebug3: authmethod_is_enabled password\r\ndebug1: Next authentication method: password\r\ndebug3: send packet: type 50\r\ndebug2: we sent a password packet, wait for reply\r\ndebug3: receive packet: type 52\r\ndebug1: Enabling compression at level 6.\r\ndebug1: Authentication succeeded (password).\r\nAuthenticated to 10.1.1.201 ([10.1.1.201]:22).\r\ndebug1: setting up multiplex master socket\r\ndebug3: muxserver_listen: temporary control path /root/.ansible/cp/48372ad7a3.XW29Aj350ciuha3l\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug3: fd 4 is O_NONBLOCK\r\ndebug3: fd 4 is O_NONBLOCK\r\ndebug1: channel 0: new [/root/.ansible/cp/48372ad7a3]\r\ndebug3: muxserver_listen: mux listener channel 0 fd 4\r\ndebug2: fd 3 setting TCP_NODELAY\r\ndebug3: ssh_packet_set_tos: set IP_TOS 0x08\r\ndebug1: control_persist_detach: backgrounding master process\r\ndebug2: control_persist_detach: background process is 16\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug1: forking to background\r\ndebug1: Entering interactive session.\r\ndebug1: pledge: id\r\ndebug2: set_control_persist_exit_time: schedule exit in 60 seconds\r\ndebug1: multiplexing control connection\r\ndebug2: fd 5 setting O_NONBLOCK\r\ndebug3: fd 5 is O_NONBLOCK\r\ndebug1: channel 1: new [mux-control]\r\ndebug3: channel_post_mux_listener: new mux channel 1 fd 5\r\ndebug3: mux_master_read_cb: channel 1: hello sent\r\ndebug2: set_control_persist_exit_time: cancel scheduled exit\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x00000001 len 4\r\ndebug2: mux_master_process_hello: channel 1 slave version 4\r\ndebug2: mux_client_hello_exchange: master version 4\r\ndebug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r\ndebug3: mux_client_request_session: entering\r\ndebug3: mux_client_request_alive: entering\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x10000004 len 4\r\ndebug2: mux_master_process_alive_check: channel 1: alive check\r\ndebug3: mux_client_request_alive: done pid = 18\r\ndebug3: mux_client_request_session: session request sent\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 256\r\ndebug3: mux_master_process_new_session: got fds stdin 6, stdout 7, stderr 8\r\ndebug2: fd 6 setting O_NONBLOCK\r\ndebug2: fd 7 setting O_NONBLOCK\r\ndebug2: fd 8 setting O_NONBLOCK\r\ndebug1: channel 2: new [client-session]\r\ndebug2: mux_master_process_new_session: channel_new: 2 linked to control channel 1\r\ndebug2: channel 2: send open\r\ndebug3: send packet: type 90\r\ndebug3: receive packet: type 80\r\ndebug1: client_input_global_request: rtype [email protected] want_reply 0\r\ndebug3: receive packet: type 91\r\ndebug2: channel_input_open_confirmation: channel 2: callback start\r\ndebug2: client_session2_setup: id 2\r\ndebug1: Sending environment.\r\ndebug1: Sending env LANG = C.UTF-8\r\ndebug2: channel 2: request env confirm 0\r\ndebug3: send packet: type 98\r\ndebug1: Sending command: /bin/sh -c 'sudo -H -S -p \"[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:\" -u root /bin/sh -c '\"'\"'echo BECOME-SUCCESS-muszsaqzcjtetkenkeiiqhqnnsdqlvmx ; python3'\"'\"' && sleep 0'\r\ndebug2: channel 2: request exec confirm 1\r\ndebug3: send packet: type 98\r\ndebug3: mux_session_confirm: sending success reply\r\ndebug2: channel_input_open_confirmation: channel 2: callback done\r\ndebug2: channel 2: open confirm rwindow 0 rmax 32768\r\ndebug2: channel 2: rcvd adjust 2097152\r\ndebug3: receive packet: type 99\r\ndebug2: channel_input_status_confirm: type 99 id 2\r\ndebug2: exec request accepted on channel 2\r\ndebug2: channel 2: read<=0 rfd 6 len 0\r\ndebug2: channel 2: read failed\r\ndebug2: channel 2: chan_shutdown_read (i0 o0 sock -1 wfd 6 efd 8 [write])\r\ndebug2: channel 2: input open -> drain\r\ndebug2: channel 2: ibuf empty\r\ndebug2: channel 2: send eof\r\ndebug3: send packet: type 96\r\ndebug2: channel 2: input drain -> closed\r\ndebug2: channel 2: rcvd adjust 65536\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 18\r\nSorry, try again.\ndebug2: channel 2: written 18 to efd 8\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 18\r\nSorry, try again.\ndebug2: channel 2: written 18 to efd 8\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 36\r\nsudo: 3 incorrect password attempts\ndebug2: channel 2: written 36 to efd 8\r\ndebug3: receive packet: type 98\r\ndebug1: client_input_channel_req: channel 2 rtype [email protected] reply 0\r\ndebug2: channel 2: rcvd eow\r\ndebug3: receive packet: type 96\r\ndebug2: channel 2: rcvd eof\r\ndebug2: channel 2: output open -> drain\r\ndebug2: channel 2: obuf empty\r\ndebug2: channel 2: chan_shutdown_write (i3 o1 sock -1 wfd 7 efd 8 [write])\r\ndebug2: channel 2: output drain -> closed\r\ndebug2: channel 2: send close\r\ndebug3: send packet: type 97\r\ndebug3: channel 2: will not send data after close\r\ndebug3: receive packet: type 98\r\ndebug1: client_input_channel_req: channel 2 rtype exit-status reply 0\r\ndebug3: mux_exit_message: channel 2: exit message, exitval 1\r\ndebug3: receive packet: type 97\r\ndebug2: channel 2: rcvd close\r\ndebug3: channel 2: will not send data after close\r\ndebug2: channel 2: is dead\r\ndebug2: channel 2: gc: notify user\r\ndebug3: mux_master_session_cleanup_cb: entering for channel 2\r\ndebug2: channel 1: rcvd close\r\ndebug2: channel 1: output open -> drain\r\ndebug2: channel 1: chan_shutdown_read (i0 o1 sock 5 wfd 5 efd -1 [closed])\r\ndebug2: channel 1: input open -> closed\r\ndebug2: channel 2: gc: user detached\r\ndebug2: channel 2: is dead\r\ndebug2: channel 2: garbage collecting\r\ndebug1: channel 2: free: client-session, nchannels 3\r\ndebug3: channel 2: status: The following connections are open:\r\n #1 mux-control (t16 nr0 i3/0 o1/16 e[closed]/0 fd 5/5/-1 sock 5 cc -1)\r\n #2 client-session (t4 r0 i3/0 o3/0 e[write]/0 fd -1/-1/8 sock -1 cc -1)\r\n\r\ndebug2: channel 1: obuf empty\r\ndebug2: channel 1: chan_shutdown_write (i3 o1 sock 5 wfd 5 efd -1 [closed])\r\ndebug2: channel 1: output drain -> closed\r\ndebug2: channel 1: is dead (local)\r\ndebug2: channel 1: gc: notify user\r\ndebug3: mux_master_control_cleanup_cb: entering for channel 1\r\ndebug2: channel 1: gc: user detached\r\ndebug2: channel 1: is dead (local)\r\ndebug2: channel 1: garbage collecting\r\ndebug1: channel 1: free: mux-control, nchannels 2\r\ndebug3: channel 1: status: The following connections are open:\r\n #1 mux-control (t16 nr0 i3/0 o3/0 e[closed]/0 fd 5/5/-1 sock 5 cc -1)\r\n\r\ndebug2: set_control_persist_exit_time: schedule exit in 60 seconds\r\ndebug3: mux_client_read_packet: read header failed: Broken pipe\r\ndebug2: Received exit status from master 1\r\n",
"module_stdout": "",
"msg": "MODULE FAILURE\nSee stdout/stderr for the exact error",
"rc": 1
}
},
"msg": "The following modules failed to execute: ansible.legacy.setup\n"
}
PLAY RECAP *********************************************************************
test1 : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
Thursday 30 September 2021 14:38:33 +0000 (0:00:17.037) 0:00:17.098 ****
===============================================================================
Gathering Facts -------------------------------------------------------- 17.04s
/workspace/test.yml:2 ---------------------------------------------------------
```
Ouptut with `-vvv`
```console
ansible-playbook [core 2.11.5]
config file = /workspace/ansible.cfg
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible-playbook
python version = 3.8.10 (default, Jun 2 2021, 10:49:15) [GCC 9.4.0]
jinja version = 3.0.1
libyaml = True
Using /workspace/ansible.cfg as config file
host_list declined parsing /workspace/inventories/test/hosts.yml as it did not pass its verify_file() method
script declined parsing /workspace/inventories/test/hosts.yml as it did not pass its verify_file() method
Parsed /workspace/inventories/test/hosts.yml inventory source with yaml plugin
Skipping callback 'default', as we already have a stdout callback.
Skipping callback 'minimal', as we already have a stdout callback.
Skipping callback 'oneline', as we already have a stdout callback.
PLAYBOOK: test.yml *************************************************************
1 plays in test.yml
PLAY [gitlab_runners] **********************************************************
TASK [Gathering Facts] *********************************************************
task path: /workspace/test.yml:2
Thursday 30 September 2021 14:39:02 +0000 (0:00:00.044) 0:00:00.044 ****
Using module file /usr/local/lib/python3.8/dist-packages/ansible/modules/setup.py
Pipelining is enabled.
<10.1.1.201> ESTABLISH SSH CONNECTION FOR USER: manager
<10.1.1.201> SSH: EXEC sshpass -d10 ssh -C -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o 'User="manager"' -o ConnectTimeout=10 -o StrictHostKeyChecking=no -o ControlPath=/root/.ansible/cp/48372ad7a3 10.1.1.201 '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=emwqvsoxlkrjzgibgvumxuqwnsbstafh] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-emwqvsoxlkrjzgibgvumxuqwnsbstafh ; python3'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<10.1.1.201> (0, b'\n{"ansible_facts": {"ansible_system": "Linux", "ansible_kernel": "5.4.0-86-generic", "ansible_kernel_version": "#97-Ubuntu SMP Fri Sep 17 19:19:40 UTC 2021", "ansible_machine": "x86_64", "ansible_python_version": "3.8.10", "ansible_fqdn": "test1", "ansible_hostname": "test1", "ansible_nodename": "test1", "ansible_domain": "", "ansible_userspace_bits": "64", "ansible_architecture": "x86_64", "ansible_userspace_architecture": "x86_64", "ansible_machine_id": "c47dd6e16b184c00a0226434c8d1b693", "ansible_user_id": "root", "ansible_user_uid": 0, "ansible_user_gid": 0, "ansible_user_gecos": "root", "ansible_user_dir": "/root", "ansible_user_shell": "/bin/bash", "ansible_real_user_id": 0, "ansible_effective_user_id": 0, "ansible_real_group_id": 0, "ansible_effective_group_id": 0, "ansible_python": {"version": {"major": 3, "minor": 8, "micro": 10, "releaselevel": "final", "serial": 0}, "version_info": [3, 8, 10, "final", 0], "executable": "/usr/bin/python3", "has_sslcontext": true, "type": "cpython"}, "ansible_fibre_channel_wwn": [], "ansible_is_chroot": false, "ansible_dns": {"nameservers": ["127.0.0.53"], "options": {"edns0": true, "trust-ad": true}, "search": ["frauscher.intern"]}, "ansible_processor": ["0", "GenuineIntel", "Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.60GHz", "1", "GenuineIntel", "Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.60GHz"], "ansible_processor_count": 1, "ansible_processor_cores": 2, "ansible_processor_threads_per_core": 1, "ansible_processor_vcpus": 2, "ansible_processor_nproc": 2, "ansible_memtotal_mb": 1987, "ansible_memfree_mb": 1459, "ansible_swaptotal_mb": 0, "ansible_swapfree_mb": 0, "ansible_memory_mb": {"real": {"total": 1987, "used": 528, "free": 1459}, "nocache": {"free": 1790, "used": 197}, "swap": {"total": 0, "free": 0, "used": 0, "cached": 0}}, "ansible_bios_date": "12/01/2006", "ansible_bios_vendor": "innotek GmbH", "ansible_bios_version": "VirtualBox", "ansible_board_asset_tag": "NA", "ansible_board_name": "VirtualBox", "ansible_board_serial": "0", "ansible_board_vendor": "Oracle Corporation", "ansible_board_version": "1.2", "ansible_chassis_asset_tag": "NA", "ansible_chassis_serial": "NA", "ansible_chassis_vendor": "Oracle Corporation", "ansible_chassis_version": "NA", "ansible_form_factor": "Other", "ansible_product_name": "VirtualBox", "ansible_product_serial": "0", "ansible_product_uuid": "e0eea82f-53ae-3447-bc29-87fb43fa6de8", "ansible_product_version": "1.2", "ansible_system_vendor": "innotek GmbH", "ansible_devices": {"loop1": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "4096", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "137728", "sectorsize": "512", "size": "67.25 MB", "host": "", "holders": []}, "sdb": {"virtual": 1, "links": {"ids": [], "uuids": ["2021-09-23-22-20-32-00"], "labels": ["cidata"], "masters": []}, "vendor": "VBOX", "model": "HARDDISK", "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "20480", "sectorsize": "512", "size": "10.00 MB", "host": "SCSI storage controller: Broadcom / LSI 53c1030 PCI-X Fusion-MPT Dual Ultra320 SCSI", "holders": []}, "loop6": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}, "loop4": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}, "loop2": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "4096", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "66168", "sectorsize": "512", "size": "32.31 MB", "host": "", "holders": []}, "loop0": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "4096", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "126504", "sectorsize": "512", "size": "61.77 MB", "host": "", "holders": []}, "loop7": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}, "sda": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": "VBOX", "model": "HARDDISK", "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {"sda1": {"links": {"ids": [], "uuids": ["94aefc52-4039-4b4a-8b2a-65124174d9b9"], "labels": ["cloudimg-rootfs"], "masters": []}, "start": "2048", "sectors": "83883999", "sectorsize": 512, "size": "40.00 GB", "uuid": "94aefc52-4039-4b4a-8b2a-65124174d9b9", "holders": []}}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "83886080", "sectorsize": "512", "size": "40.00 GB", "host": "SCSI storage controller: Broadcom / LSI 53c1030 PCI-X Fusion-MPT Dual Ultra320 SCSI", "holders": []}, "loop5": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}, "loop3": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "4096", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}}, "ansible_device_links": {"ids": {}, "uuids": {"sda1": ["94aefc52-4039-4b4a-8b2a-65124174d9b9"], "sdb": ["2021-09-23-22-20-32-00"]}, "labels": {"sda1": ["cloudimg-rootfs"], "sdb": ["cidata"]}, "masters": {}}, "ansible_uptime_seconds": 319, "ansible_lvm": {"lvs": {}, "vgs": {}, "pvs": {}}, "ansible_mounts": [{"mount": "/", "device": "/dev/sda1", "fstype": "ext4", "options": "rw,relatime", "size_total": 41567858688, "size_available": 40172617728, "block_size": 4096, "block_total": 10148403, "block_available": 9807768, "block_used": 340635, "inode_total": 5120000, "inode_available": 5047941, "inode_used": 72059, "uuid": "94aefc52-4039-4b4a-8b2a-65124174d9b9"}, {"mount": "/snap/core20/1081", "device": "/dev/loop0", "fstype": "squashfs", "options": "ro,nodev,relatime", "size_total": 64880640, "size_available": 0, "block_size": 131072, "block_total": 495, "block_available": 0, "block_used": 495, "inode_total": 11720, "inode_available": 0, "inode_used": 11720, "uuid": "N/A"}, {"mount": "/snap/lxd/21545", "device": "/dev/loop1", "fstype": "squashfs", "options": "ro,nodev,relatime", "size_total": 70516736, "size_available": 0, "block_size": 131072, "block_total": 538, "block_available": 0, "block_used": 538, "inode_total": 796, "inode_available": 0, "inode_used": 796, "uuid": "N/A"}, {"mount": "/snap/snapd/13170", "device": "/dev/loop2", "fstype": "squashfs", "options": "ro,nodev,relatime", "size_total": 33947648, "size_available": 0, "block_size": 131072, "block_total": 259, "block_available": 0, "block_used": 259, "inode_total": 474, "inode_available": 0, "inode_used": 474, "uuid": "N/A"}], "ansible_system_capabilities_enforced": "False", "ansible_system_capabilities": [], "ansible_ssh_host_key_dsa_public": "AAAAB3NzaC1kc3MAAACBANhTZfF+TbSqS3c3V0mYbiGOOHdB+wzCQlw73P/orjE3GaQQ3u3x4FA5RZcrfaBXKxgCatn4oTHbYN1vPhRECVvasTgZtyNP418lXaEsd4fJ7SLc0ju4BKaq8Pr5ppNSbcOEKru96ENnbHHrDi/XC7fKSpHijjGDhK86+HYs724/AAAAFQDGLonRqAKyuVczMoJLceaRShaCAwAAAIArdhTSkCW1LKS6II0NSonSQWeFb6f0X2kGVmfyvy9wFDgwdx5BkMEdaJBZrdIrROHQF+IPcK3jNfEAeJjfJ07YSFPiTQGzV6Y3clHchT/y6z0t0yHn+eP3w3OEjAPwRB4NAOdGsQdpg7Qs/sgSLjJ47EbLpf6P+ImfH7L8NMNB+wAAAIBIIpiFAEOa670brc4+aIJhObs3vuCKkmr9lOHBBqtMM/a8CQprG8PrbacZ3TRUBzBMTmWyFVSsniEpAhy1V7BHt7sgUBWu27A4fb1O94Vb1r7cXZ8BSKnT8TRmzdnxMdYPAiYhgQYF3uRfPB97AjBlDOaZGIug7quxtoM8+lg6YQ==", "ansible_ssh_host_key_dsa_public_keytype": "ssh-dss", "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABgQDYA8nuLROQD6tE5QAyK/7j+ewLYa+xOLFcDelQ2ack4gh8+2cM11oEvuz6QcL4xVKIOz308M8WK9x6dKGF2ujCaSEp2/DWXLbpCX/Rn6pB6OOeolq6Cop/OTE8M9tutCzyG1mr9UZZTkPks/j3bsIPPHjHNf/554BH4beG7zTlzARp4MABqkmTQ1XGGzZLxzRGKUUJaLo+6mxxf+3mcKKpzHTHiaGf20s57WXIbokJ1PJ4iwh/zdT93ebG7VwxxPcxDs+UwHcTJMnhXHpOwlXaBxUyawzOWGceWnpmlXs/aycqZ/5LbZf1BQp13WnDiLfuD1tzXCHq0Dl05cnMuOJhwSSb+13UZr1uxk7y8EUiYKKbOoamj7+8wvfi++3mGwHW1kokm1wWsu79EBxaVu7r0nTxs57JuCHFIUvDt63ajI0G4+U9Qhu7Kxi9NmNyoyv1JSPVXyv2dSZFQZwh6phzyyztirQSR4wbTzlEbmhKHESMUkhE0mgdMWajxzLUIXM=", "ansible_ssh_host_key_rsa_public_keytype": "ssh-rsa", "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOrElzJrdktqNP+A5xDLWb3KLPYOjn9q/0v0+/15rBMNgTvkQXyVhbV/b8OxEpjo7evCk4XNM6Zg17N5AxdDX7k=", "ansible_ssh_host_key_ecdsa_public_keytype": "ecdsa-sha2-nistp256", "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAICHStriNsa0rVUQpwU7t4cmg+DNXSPVS+csFo7nbnlHx", "ansible_ssh_host_key_ed25519_public_keytype": "ssh-ed25519", "ansible_iscsi_iqn": "", "ansible_fips": false, "ansible_distribution": "Ubuntu", "ansible_distribution_release": "focal", "ansible_distribution_version": "20.04", "ansible_distribution_major_version": "20", "ansible_distribution_file_path": "/etc/os-release", "ansible_distribution_file_variety": "Debian", "ansible_distribution_file_parsed": true, "ansible_os_family": "Debian", "ansible_virtualization_role": "guest", "ansible_virtualization_type": "virtualbox", "ansible_virtualization_tech_guest": ["virtualbox"], "ansible_virtualization_tech_host": [], "ansible_lsb": {"id": "Ubuntu", "description": "Ubuntu 20.04.3 LTS", "release": "20.04", "codename": "focal", "major_release": "20"}, "ansible_cmdline": {"BOOT_IMAGE": "/boot/vmlinuz-5.4.0-86-generic", "root": "UUID=94aefc52-4039-4b4a-8b2a-65124174d9b9", "ro": true, "console": "ttyS0"}, "ansible_proc_cmdline": {"BOOT_IMAGE": "/boot/vmlinuz-5.4.0-86-generic", "root": "UUID=94aefc52-4039-4b4a-8b2a-65124174d9b9", "ro": true, "console": ["tty1", "ttyS0"]}, "ansible_hostnqn": "", "ansible_local": {}, "ansible_env": {"SUDO_GID": "1900", "MAIL": "/var/mail/root", "USER": "root", "HOME": "/root", "SUDO_UID": "1900", "LOGNAME": "root", "TERM": "unknown", "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin", "LANG": "C.UTF-8", "SUDO_COMMAND": "/bin/sh -c echo BECOME-SUCCESS-emwqvsoxlkrjzgibgvumxuqwnsbstafh ; python3", "SHELL": "/bin/bash", "SUDO_USER": "manager", "PWD": "/home/manager"}, "ansible_interfaces": ["enp0s8", "enp0s3", "lo"], "ansible_enp0s8": {"device": "enp0s8", "macaddress": "08:00:27:67:8d:69", "mtu": 1500, "active": true, "module": "e1000", "type": "ether", "pciid": "0000:00:08.0", "speed": 1000, "promisc": false, "ipv4": {"address": "10.1.1.201", "broadcast": "10.1.1.255", "netmask": "255.255.255.0", "network": "10.1.1.0"}, "ipv6": [{"address": "fe80::a00:27ff:fe67:8d69", "prefix": "64", "scope": "link"}], "features": {"rx_checksumming": "off", "tx_checksumming": "on", "tx_checksum_ipv4": "off [fixed]", "tx_checksum_ip_generic": "on", "tx_checksum_ipv6": "off [fixed]", "tx_checksum_fcoe_crc": "off [fixed]", "tx_checksum_sctp": "off [fixed]", "scatter_gather": "on", "tx_scatter_gather": "on", "tx_scatter_gather_fraglist": "off [fixed]", "tcp_segmentation_offload": "on", "tx_tcp_segmentation": "on", "tx_tcp_ecn_segmentation": "off [fixed]", "tx_tcp_mangleid_segmentation": "off", "tx_tcp6_segmentation": "off [fixed]", "generic_segmentation_offload": "on", "generic_receive_offload": "on", "large_receive_offload": "off [fixed]", "rx_vlan_offload": "on", "tx_vlan_offload": "on [fixed]", "ntuple_filters": "off [fixed]", "receive_hashing": "off [fixed]", "highdma": "off [fixed]", "rx_vlan_filter": "on [fixed]", "vlan_challenged": "off [fixed]", "tx_lockless": "off [fixed]", "netns_local": "off [fixed]", "tx_gso_robust": "off [fixed]", "tx_fcoe_segmentation": "off [fixed]", "tx_gre_segmentation": "off [fixed]", "tx_gre_csum_segmentation": "off [fixed]", "tx_ipxip4_segmentation": "off [fixed]", "tx_ipxip6_segmentation": "off [fixed]", "tx_udp_tnl_segmentation": "off [fixed]", "tx_udp_tnl_csum_segmentation": "off [fixed]", "tx_gso_partial": "off [fixed]", "tx_sctp_segmentation": "off [fixed]", "tx_esp_segmentation": "off [fixed]", "tx_udp_segmentation": "off [fixed]", "fcoe_mtu": "off [fixed]", "tx_nocache_copy": "off", "loopback": "off [fixed]", "rx_fcs": "off", "rx_all": "off", "tx_vlan_stag_hw_insert": "off [fixed]", "rx_vlan_stag_hw_parse": "off [fixed]", "rx_vlan_stag_filter": "off [fixed]", "l2_fwd_offload": "off [fixed]", "hw_tc_offload": "off [fixed]", "esp_hw_offload": "off [fixed]", "esp_tx_csum_hw_offload": "off [fixed]", "rx_udp_tunnel_port_offload": "off [fixed]", "tls_hw_tx_offload": "off [fixed]", "tls_hw_rx_offload": "off [fixed]", "rx_gro_hw": "off [fixed]", "tls_hw_record": "off [fixed]"}, "timestamping": ["tx_software", "rx_software", "software"], "hw_timestamp_filters": []}, "ansible_enp0s3": {"device": "enp0s3", "macaddress": "02:de:69:7f:c7:01", "mtu": 1500, "active": true, "module": "e1000", "type": "ether", "pciid": "0000:00:03.0", "speed": 1000, "promisc": false, "ipv4": {"address": "10.0.2.15", "broadcast": "10.0.2.255", "netmask": "255.255.255.0", "network": "10.0.2.0"}, "ipv6": [{"address": "fe80::de:69ff:fe7f:c701", "prefix": "64", "scope": "link"}], "features": {"rx_checksumming": "off", "tx_checksumming": "on", "tx_checksum_ipv4": "off [fixed]", "tx_checksum_ip_generic": "on", "tx_checksum_ipv6": "off [fixed]", "tx_checksum_fcoe_crc": "off [fixed]", "tx_checksum_sctp": "off [fixed]", "scatter_gather": "on", "tx_scatter_gather": "on", "tx_scatter_gather_fraglist": "off [fixed]", "tcp_segmentation_offload": "on", "tx_tcp_segmentation": "on", "tx_tcp_ecn_segmentation": "off [fixed]", "tx_tcp_mangleid_segmentation": "off", "tx_tcp6_segmentation": "off [fixed]", "generic_segmentation_offload": "on", "generic_receive_offload": "on", "large_receive_offload": "off [fixed]", "rx_vlan_offload": "on", "tx_vlan_offload": "on [fixed]", "ntuple_filters": "off [fixed]", "receive_hashing": "off [fixed]", "highdma": "off [fixed]", "rx_vlan_filter": "on [fixed]", "vlan_challenged": "off [fixed]", "tx_lockless": "off [fixed]", "netns_local": "off [fixed]", "tx_gso_robust": "off [fixed]", "tx_fcoe_segmentation": "off [fixed]", "tx_gre_segmentation": "off [fixed]", "tx_gre_csum_segmentation": "off [fixed]", "tx_ipxip4_segmentation": "off [fixed]", "tx_ipxip6_segmentation": "off [fixed]", "tx_udp_tnl_segmentation": "off [fixed]", "tx_udp_tnl_csum_segmentation": "off [fixed]", "tx_gso_partial": "off [fixed]", "tx_sctp_segmentation": "off [fixed]", "tx_esp_segmentation": "off [fixed]", "tx_udp_segmentation": "off [fixed]", "fcoe_mtu": "off [fixed]", "tx_nocache_copy": "off", "loopback": "off [fixed]", "rx_fcs": "off", "rx_all": "off", "tx_vlan_stag_hw_insert": "off [fixed]", "rx_vlan_stag_hw_parse": "off [fixed]", "rx_vlan_stag_filter": "off [fixed]", "l2_fwd_offload": "off [fixed]", "hw_tc_offload": "off [fixed]", "esp_hw_offload": "off [fixed]", "esp_tx_csum_hw_offload": "off [fixed]", "rx_udp_tunnel_port_offload": "off [fixed]", "tls_hw_tx_offload": "off [fixed]", "tls_hw_rx_offload": "off [fixed]", "rx_gro_hw": "off [fixed]", "tls_hw_record": "off [fixed]"}, "timestamping": ["tx_software", "rx_software", "software"], "hw_timestamp_filters": []}, "ansible_lo": {"device": "lo", "mtu": 65536, "active": true, "type": "loopback", "promisc": false, "ipv4": {"address": "127.0.0.1", "broadcast": "", "netmask": "255.0.0.0", "network": "127.0.0.0"}, "ipv6": [{"address": "::1", "prefix": "128", "scope": "host"}], "features": {"rx_checksumming": "on [fixed]", "tx_checksumming": "on", "tx_checksum_ipv4": "off [fixed]", "tx_checksum_ip_generic": "on [fixed]", "tx_checksum_ipv6": "off [fixed]", "tx_checksum_fcoe_crc": "off [fixed]", "tx_checksum_sctp": "on [fixed]", "scatter_gather": "on", "tx_scatter_gather": "on [fixed]", "tx_scatter_gather_fraglist": "on [fixed]", "tcp_segmentation_offload": "on", "tx_tcp_segmentation": "on", "tx_tcp_ecn_segmentation": "on", "tx_tcp_mangleid_segmentation": "on", "tx_tcp6_segmentation": "on", "generic_segmentation_offload": "on", "generic_receive_offload": "on", "large_receive_offload": "off [fixed]", "rx_vlan_offload": "off [fixed]", "tx_vlan_offload": "off [fixed]", "ntuple_filters": "off [fixed]", "receive_hashing": "off [fixed]", "highdma": "on [fixed]", "rx_vlan_filter": "off [fixed]", "vlan_challenged": "on [fixed]", "tx_lockless": "on [fixed]", "netns_local": "on [fixed]", "tx_gso_robust": "off [fixed]", "tx_fcoe_segmentation": "off [fixed]", "tx_gre_segmentation": "off [fixed]", "tx_gre_csum_segmentation": "off [fixed]", "tx_ipxip4_segmentation": "off [fixed]", "tx_ipxip6_segmentation": "off [fixed]", "tx_udp_tnl_segmentation": "off [fixed]", "tx_udp_tnl_csum_segmentation": "off [fixed]", "tx_gso_partial": "off [fixed]", "tx_sctp_segmentation": "on", "tx_esp_segmentation": "off [fixed]", "tx_udp_segmentation": "off [fixed]", "fcoe_mtu": "off [fixed]", "tx_nocache_copy": "off [fixed]", "loopback": "on [fixed]", "rx_fcs": "off [fixed]", "rx_all": "off [fixed]", "tx_vlan_stag_hw_insert": "off [fixed]", "rx_vlan_stag_hw_parse": "off [fixed]", "rx_vlan_stag_filter": "off [fixed]", "l2_fwd_offload": "off [fixed]", "hw_tc_offload": "off [fixed]", "esp_hw_offload": "off [fixed]", "esp_tx_csum_hw_offload": "off [fixed]", "rx_udp_tunnel_port_offload": "off [fixed]", "tls_hw_tx_offload": "off [fixed]", "tls_hw_rx_offload": "off [fixed]", "rx_gro_hw": "off [fixed]", "tls_hw_record": "off [fixed]"}, "timestamping": ["tx_software", "rx_software", "software"], "hw_timestamp_filters": []}, "ansible_default_ipv4": {"gateway": "10.0.2.2", "interface": "enp0s3", "address": "10.0.2.15", "broadcast": "10.0.2.255", "netmask": "255.255.255.0", "network": "10.0.2.0", "macaddress": "02:de:69:7f:c7:01", "mtu": 1500, "type": "ether", "alias": "enp0s3"}, "ansible_default_ipv6": {}, "ansible_all_ipv4_addresses": ["10.1.1.201", "10.0.2.15"], "ansible_all_ipv6_addresses": ["fe80::a00:27ff:fe67:8d69", "fe80::de:69ff:fe7f:c701"], "ansible_selinux_python_present": true, "ansible_selinux": {"status": "disabled"}, "ansible_apparmor": {"status": "enabled"}, "ansible_date_time": {"year": "2021", "month": "09", "weekday": "Thursday", "weekday_number": "4", "weeknumber": "39", "day": "30", "hour": "14", "minute": "39", "second": "09", "epoch": "1633012749", "date": "2021-09-30", "time": "14:39:09", "iso8601_micro": "2021-09-30T14:39:09.397030Z", "iso8601": "2021-09-30T14:39:09Z", "iso8601_basic": "20210930T143909397030", "iso8601_basic_short": "20210930T143909", "tz": "UTC", "tz_dst": "UTC", "tz_offset": "+0000"}, "ansible_service_mgr": "systemd", "ansible_pkg_mgr": "apt", "gather_subset": ["all"], "module_setup": true}, "invocation": {"module_args": {"gather_subset": ["all"], "gather_timeout": 10, "filter": [], "fact_path": "/etc/ansible/facts.d"}}}\n', b"Warning: Permanently added '10.1.1.201' (ECDSA) to the list of known hosts.\r\n")
ok: [test1]
META: ran handlers
TASK [debug] *******************************************************************
task path: /workspace/test.yml:5
Thursday 30 September 2021 14:39:19 +0000 (0:00:17.449) 0:00:17.494 ****
ok: [test1] => {
"msg": "Success"
}
META: ran handlers
META: ran handlers
PLAY RECAP *********************************************************************
test1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
Thursday 30 September 2021 14:39:19 +0000 (0:00:00.040) 0:00:17.534 ****
===============================================================================
Gathering Facts -------------------------------------------------------- 17.45s
/workspace/test.yml:2 ---------------------------------------------------------
debug ------------------------------------------------------------------- 0.04s
/workspace/test.yml:5 ---------------------------------------------------------
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75834
|
https://github.com/ansible/ansible/pull/76732
|
9142be2f6cabbe6597c9254c5bb9186d17036d55
|
0ff80a15ba40c2aff3b96c1152f19c97a92d3c97
| 2021-09-30T08:41:28Z |
python
| 2022-01-13T21:28:09Z |
lib/ansible/plugins/connection/ssh.py
|
# Copyright (c) 2012, Michael DeHaan <[email protected]>
# Copyright 2015 Abhijit Menon-Sen <[email protected]>
# Copyright 2017 Toshio Kuratomi <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: ssh
short_description: connect via SSH client binary
description:
- This connection plugin allows Ansible to communicate to the target machines through normal SSH command line.
- Ansible does not expose a channel to allow communication between the user and the SSH process to accept
a password manually to decrypt an SSH key when using this connection plugin (which is the default). The
use of C(ssh-agent) is highly recommended.
author: ansible (@core)
extends_documentation_fragment:
- connection_pipelining
version_added: historical
notes:
- Many options default to C(None) here but that only means we do not override the SSH tool's defaults and/or configuration.
For example, if you specify the port in this plugin it will override any C(Port) entry in your C(.ssh/config).
options:
host:
description: Hostname/IP to connect to.
vars:
- name: inventory_hostname
- name: ansible_host
- name: ansible_ssh_host
- name: delegated_vars['ansible_host']
- name: delegated_vars['ansible_ssh_host']
host_key_checking:
description: Determines if SSH should check host keys.
default: True
type: boolean
ini:
- section: defaults
key: 'host_key_checking'
- section: ssh_connection
key: 'host_key_checking'
version_added: '2.5'
env:
- name: ANSIBLE_HOST_KEY_CHECKING
- name: ANSIBLE_SSH_HOST_KEY_CHECKING
version_added: '2.5'
vars:
- name: ansible_host_key_checking
version_added: '2.5'
- name: ansible_ssh_host_key_checking
version_added: '2.5'
password:
description: Authentication password for the C(remote_user). Can be supplied as CLI option.
vars:
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_ssh_password
sshpass_prompt:
description:
- Password prompt that sshpass should search for. Supported by sshpass 1.06 and up.
- Defaults to C(Enter PIN for) when pkcs11_provider is set.
default: ''
ini:
- section: 'ssh_connection'
key: 'sshpass_prompt'
env:
- name: ANSIBLE_SSHPASS_PROMPT
vars:
- name: ansible_sshpass_prompt
version_added: '2.10'
ssh_args:
description: Arguments to pass to all SSH CLI tools.
default: '-C -o ControlMaster=auto -o ControlPersist=60s'
ini:
- section: 'ssh_connection'
key: 'ssh_args'
env:
- name: ANSIBLE_SSH_ARGS
vars:
- name: ansible_ssh_args
version_added: '2.7'
ssh_common_args:
description: Common extra args for all SSH CLI tools.
ini:
- section: 'ssh_connection'
key: 'ssh_common_args'
version_added: '2.7'
env:
- name: ANSIBLE_SSH_COMMON_ARGS
version_added: '2.7'
vars:
- name: ansible_ssh_common_args
cli:
- name: ssh_common_args
default: ''
ssh_executable:
default: ssh
description:
- This defines the location of the SSH binary. It defaults to C(ssh) which will use the first SSH binary available in $PATH.
- This option is usually not required, it might be useful when access to system SSH is restricted,
or when using SSH wrappers to connect to remote hosts.
env: [{name: ANSIBLE_SSH_EXECUTABLE}]
ini:
- {key: ssh_executable, section: ssh_connection}
#const: ANSIBLE_SSH_EXECUTABLE
version_added: "2.2"
vars:
- name: ansible_ssh_executable
version_added: '2.7'
sftp_executable:
default: sftp
description:
- This defines the location of the sftp binary. It defaults to C(sftp) which will use the first binary available in $PATH.
env: [{name: ANSIBLE_SFTP_EXECUTABLE}]
ini:
- {key: sftp_executable, section: ssh_connection}
version_added: "2.6"
vars:
- name: ansible_sftp_executable
version_added: '2.7'
scp_executable:
default: scp
description:
- This defines the location of the scp binary. It defaults to C(scp) which will use the first binary available in $PATH.
env: [{name: ANSIBLE_SCP_EXECUTABLE}]
ini:
- {key: scp_executable, section: ssh_connection}
version_added: "2.6"
vars:
- name: ansible_scp_executable
version_added: '2.7'
scp_extra_args:
description: Extra exclusive to the C(scp) CLI
vars:
- name: ansible_scp_extra_args
env:
- name: ANSIBLE_SCP_EXTRA_ARGS
version_added: '2.7'
ini:
- key: scp_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: scp_extra_args
default: ''
sftp_extra_args:
description: Extra exclusive to the C(sftp) CLI
vars:
- name: ansible_sftp_extra_args
env:
- name: ANSIBLE_SFTP_EXTRA_ARGS
version_added: '2.7'
ini:
- key: sftp_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: sftp_extra_args
default: ''
ssh_extra_args:
description: Extra exclusive to the SSH CLI.
vars:
- name: ansible_ssh_extra_args
env:
- name: ANSIBLE_SSH_EXTRA_ARGS
version_added: '2.7'
ini:
- key: ssh_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: ssh_extra_args
default: ''
reconnection_retries:
description: Number of attempts to connect.
default: 0
type: integer
env:
- name: ANSIBLE_SSH_RETRIES
ini:
- section: connection
key: retries
- section: ssh_connection
key: retries
vars:
- name: ansible_ssh_retries
version_added: '2.7'
port:
description: Remote port to connect to.
type: int
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
- name: ansible_ssh_port
remote_user:
description:
- User name with which to login to the remote server, normally set by the remote_user keyword.
- If no user is supplied, Ansible will let the SSH client binary choose the user as it normally.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
- name: ansible_ssh_user
cli:
- name: user
pipelining:
env:
- name: ANSIBLE_PIPELINING
- name: ANSIBLE_SSH_PIPELINING
ini:
- section: defaults
key: pipelining
- section: connection
key: pipelining
- section: ssh_connection
key: pipelining
vars:
- name: ansible_pipelining
- name: ansible_ssh_pipelining
private_key_file:
description:
- Path to private key file to use for authentication.
ini:
- section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
- name: ansible_ssh_private_key_file
cli:
- name: private_key_file
option: '--private-key'
control_path:
description:
- This is the location to save SSH's ControlPath sockets, it uses SSH's variable substitution.
- Since 2.3, if null (default), ansible will generate a unique hash. Use ``%(directory)s`` to indicate where to use the control dir path setting.
- Before 2.3 it defaulted to ``control_path=%(directory)s/ansible-ssh-%%h-%%p-%%r``.
- Be aware that this setting is ignored if C(-o ControlPath) is set in ssh args.
env:
- name: ANSIBLE_SSH_CONTROL_PATH
ini:
- key: control_path
section: ssh_connection
vars:
- name: ansible_control_path
version_added: '2.7'
control_path_dir:
default: ~/.ansible/cp
description:
- This sets the directory to use for ssh control path if the control path setting is null.
- Also, provides the ``%(directory)s`` variable for the control path setting.
env:
- name: ANSIBLE_SSH_CONTROL_PATH_DIR
ini:
- section: ssh_connection
key: control_path_dir
vars:
- name: ansible_control_path_dir
version_added: '2.7'
sftp_batch_mode:
default: 'yes'
description: 'TODO: write it'
env: [{name: ANSIBLE_SFTP_BATCH_MODE}]
ini:
- {key: sftp_batch_mode, section: ssh_connection}
type: bool
vars:
- name: ansible_sftp_batch_mode
version_added: '2.7'
ssh_transfer_method:
description:
- "Preferred method to use when transferring files over ssh"
- Setting to 'smart' (default) will try them in order, until one succeeds or they all fail
- Using 'piped' creates an ssh pipe with C(dd) on either side to copy the data
choices: ['sftp', 'scp', 'piped', 'smart']
env: [{name: ANSIBLE_SSH_TRANSFER_METHOD}]
ini:
- {key: transfer_method, section: ssh_connection}
vars:
- name: ansible_ssh_transfer_method
version_added: '2.12'
scp_if_ssh:
deprecated:
why: In favor of the "ssh_transfer_method" option.
version: "2.17"
alternatives: ssh_transfer_method
default: smart
description:
- "Preferred method to use when transfering files over SSH."
- When set to I(smart), Ansible will try them until one succeeds or they all fail.
- If set to I(True), it will force 'scp', if I(False) it will use 'sftp'.
- This setting will overridden by ssh_transfer_method if set.
env: [{name: ANSIBLE_SCP_IF_SSH}]
ini:
- {key: scp_if_ssh, section: ssh_connection}
vars:
- name: ansible_scp_if_ssh
version_added: '2.7'
use_tty:
version_added: '2.5'
default: 'yes'
description: add -tt to ssh commands to force tty allocation.
env: [{name: ANSIBLE_SSH_USETTY}]
ini:
- {key: usetty, section: ssh_connection}
type: bool
vars:
- name: ansible_ssh_use_tty
version_added: '2.7'
timeout:
default: 10
description:
- This is the default ammount of time we will wait while establishing an SSH connection.
- It also controls how long we can wait to access reading the connection once established (select on the socket).
env:
- name: ANSIBLE_TIMEOUT
- name: ANSIBLE_SSH_TIMEOUT
version_added: '2.11'
ini:
- key: timeout
section: defaults
- key: timeout
section: ssh_connection
version_added: '2.11'
vars:
- name: ansible_ssh_timeout
version_added: '2.11'
cli:
- name: timeout
type: integer
pkcs11_provider:
version_added: '2.12'
default: ""
description:
- "PKCS11 SmartCard provider such as opensc, example: /usr/local/lib/opensc-pkcs11.so"
- Requires sshpass version 1.06+, sshpass must support the -P option.
env: [{name: ANSIBLE_PKCS11_PROVIDER}]
ini:
- {key: pkcs11_provider, section: ssh_connection}
vars:
- name: ansible_ssh_pkcs11_provider
'''
import errno
import fcntl
import hashlib
import os
import pty
import re
import shlex
import subprocess
import time
from functools import wraps
from ansible.errors import (
AnsibleAuthenticationFailure,
AnsibleConnectionFailure,
AnsibleError,
AnsibleFileNotFound,
)
from ansible.errors import AnsibleOptionsError
from ansible.module_utils.compat import selectors
from ansible.module_utils.six import PY3, text_type, binary_type
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.parsing.convert_bool import BOOLEANS, boolean
from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.plugins.shell.powershell import _parse_clixml
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath, makedirs_safe
display = Display()
b_NOT_SSH_ERRORS = (b'Traceback (most recent call last):', # Python-2.6 when there's an exception
# while invoking a script via -m
b'PHP Parse error:', # Php always returns error 255
)
SSHPASS_AVAILABLE = None
class AnsibleControlPersistBrokenPipeError(AnsibleError):
''' ControlPersist broken pipe '''
pass
def _handle_error(remaining_retries, command, return_tuple, no_log, host, display=display):
# sshpass errors
if command == b'sshpass':
# Error 5 is invalid/incorrect password. Raise an exception to prevent retries from locking the account.
if return_tuple[0] == 5:
msg = 'Invalid/incorrect username/password. Skipping remaining {0} retries to prevent account lockout:'.format(remaining_retries)
if remaining_retries <= 0:
msg = 'Invalid/incorrect password:'
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip())
raise AnsibleAuthenticationFailure(msg)
# sshpass returns codes are 1-6. We handle 5 previously, so this catches other scenarios.
# No exception is raised, so the connection is retried - except when attempting to use
# sshpass_prompt with an sshpass that won't let us pass -P, in which case we fail loudly.
elif return_tuple[0] in [1, 2, 3, 4, 6]:
msg = 'sshpass error:'
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
details = to_native(return_tuple[2]).rstrip()
if "sshpass: invalid option -- 'P'" in details:
details = 'Installed sshpass version does not support customized password prompts. ' \
'Upgrade sshpass to use sshpass_prompt, or otherwise switch to ssh keys.'
raise AnsibleError('{0} {1}'.format(msg, details))
msg = '{0} {1}'.format(msg, details)
if return_tuple[0] == 255:
SSH_ERROR = True
for signature in b_NOT_SSH_ERRORS:
if signature in return_tuple[1]:
SSH_ERROR = False
break
if SSH_ERROR:
msg = "Failed to connect to the host via ssh:"
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip())
raise AnsibleConnectionFailure(msg)
# For other errors, no exception is raised so the connection is retried and we only log the messages
if 1 <= return_tuple[0] <= 254:
msg = u"Failed to connect to the host via ssh:"
if no_log:
msg = u'{0} <error censored due to no log>'.format(msg)
else:
msg = u'{0} {1}'.format(msg, to_text(return_tuple[2]).rstrip())
display.vvv(msg, host=host)
def _ssh_retry(func):
"""
Decorator to retry ssh/scp/sftp in the case of a connection failure
Will retry if:
* an exception is caught
* ssh returns 255
Will not retry if
* sshpass returns 5 (invalid password, to prevent account lockouts)
* remaining_tries is < 2
* retries limit reached
"""
@wraps(func)
def wrapped(self, *args, **kwargs):
remaining_tries = int(self.get_option('reconnection_retries')) + 1
cmd_summary = u"%s..." % to_text(args[0])
conn_password = self.get_option('password') or self._play_context.password
for attempt in range(remaining_tries):
cmd = args[0]
if attempt != 0 and conn_password and isinstance(cmd, list):
# If this is a retry, the fd/pipe for sshpass is closed, and we need a new one
self.sshpass_pipe = os.pipe()
cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
try:
try:
return_tuple = func(self, *args, **kwargs)
# TODO: this should come from task
if self._play_context.no_log:
display.vvv(u'rc=%s, stdout and stderr censored due to no log' % return_tuple[0], host=self.host)
else:
display.vvv(return_tuple, host=self.host)
# 0 = success
# 1-254 = remote command return code
# 255 could be a failure from the ssh command itself
except (AnsibleControlPersistBrokenPipeError):
# Retry one more time because of the ControlPersist broken pipe (see #16731)
cmd = args[0]
if conn_password and isinstance(cmd, list):
# This is a retry, so the fd/pipe for sshpass is closed, and we need a new one
self.sshpass_pipe = os.pipe()
cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
display.vvv(u"RETRYING BECAUSE OF CONTROLPERSIST BROKEN PIPE")
return_tuple = func(self, *args, **kwargs)
remaining_retries = remaining_tries - attempt - 1
_handle_error(remaining_retries, cmd[0], return_tuple, self._play_context.no_log, self.host)
break
# 5 = Invalid/incorrect password from sshpass
except AnsibleAuthenticationFailure:
# Raising this exception, which is subclassed from AnsibleConnectionFailure, prevents further retries
raise
except (AnsibleConnectionFailure, Exception) as e:
if attempt == remaining_tries - 1:
raise
else:
pause = 2 ** attempt - 1
if pause > 30:
pause = 30
if isinstance(e, AnsibleConnectionFailure):
msg = u"ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt + 1, cmd_summary, pause)
else:
msg = (u"ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), "
u"pausing for %d seconds" % (attempt + 1, to_text(e), cmd_summary, pause))
display.vv(msg, host=self.host)
time.sleep(pause)
continue
return return_tuple
return wrapped
class Connection(ConnectionBase):
''' ssh based connections '''
transport = 'ssh'
has_pipelining = True
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
# TODO: all should come from get_option(), but not might be set at this point yet
self.host = self._play_context.remote_addr
self.port = self._play_context.port
self.user = self._play_context.remote_user
self.control_path = None
self.control_path_dir = None
# Windows operates differently from a POSIX connection/shell plugin,
# we need to set various properties to ensure SSH on Windows continues
# to work
if getattr(self._shell, "_IS_WINDOWS", False):
self.has_native_async = True
self.always_pipeline_modules = True
self.module_implementation_preferences = ('.ps1', '.exe', '')
self.allow_executable = False
# The connection is created by running ssh/scp/sftp from the exec_command,
# put_file, and fetch_file methods, so we don't need to do any connection
# management here.
def _connect(self):
return self
@staticmethod
def _create_control_path(host, port, user, connection=None, pid=None):
'''Make a hash for the controlpath based on con attributes'''
pstring = '%s-%s-%s' % (host, port, user)
if connection:
pstring += '-%s' % connection
if pid:
pstring += '-%s' % to_text(pid)
m = hashlib.sha1()
m.update(to_bytes(pstring))
digest = m.hexdigest()
cpath = '%(directory)s/' + digest[:10]
return cpath
@staticmethod
def _sshpass_available():
global SSHPASS_AVAILABLE
# We test once if sshpass is available, and remember the result. It
# would be nice to use distutils.spawn.find_executable for this, but
# distutils isn't always available; shutils.which() is Python3-only.
if SSHPASS_AVAILABLE is None:
try:
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
SSHPASS_AVAILABLE = True
except OSError:
SSHPASS_AVAILABLE = False
return SSHPASS_AVAILABLE
@staticmethod
def _persistence_controls(b_command):
'''
Takes a command array and scans it for ControlPersist and ControlPath
settings and returns two booleans indicating whether either was found.
This could be smarter, e.g. returning false if ControlPersist is 'no',
but for now we do it simple way.
'''
controlpersist = False
controlpath = False
for b_arg in (a.lower() for a in b_command):
if b'controlpersist' in b_arg:
controlpersist = True
elif b'controlpath' in b_arg:
controlpath = True
return controlpersist, controlpath
def _add_args(self, b_command, b_args, explanation):
"""
Adds arguments to the ssh command and displays a caller-supplied explanation of why.
:arg b_command: A list containing the command to add the new arguments to.
This list will be modified by this method.
:arg b_args: An iterable of new arguments to add. This iterable is used
more than once so it must be persistent (ie: a list is okay but a
StringIO would not)
:arg explanation: A text string containing explaining why the arguments
were added. It will be displayed with a high enough verbosity.
.. note:: This function does its work via side-effect. The b_command list has the new arguments appended.
"""
display.vvvvv(u'SSH: %s: (%s)' % (explanation, ')('.join(to_text(a) for a in b_args)), host=self.host)
b_command += b_args
def _build_command(self, binary, subsystem, *other_args):
'''
Takes a executable (ssh, scp, sftp or wrapper) and optional extra arguments and returns the remote command
wrapped in local ssh shell commands and ready for execution.
:arg binary: actual executable to use to execute command.
:arg subsystem: type of executable provided, ssh/sftp/scp, needed because wrappers for ssh might have diff names.
:arg other_args: dict of, value pairs passed as arguments to the ssh binary
'''
b_command = []
conn_password = self.get_option('password') or self._play_context.password
#
# First, the command to invoke
#
# If we want to use password authentication, we have to set up a pipe to
# write the password to sshpass.
pkcs11_provider = self.get_option("pkcs11_provider")
if conn_password or pkcs11_provider:
if not self._sshpass_available():
raise AnsibleError("to use the 'ssh' connection type with passwords or pkcs11_provider, you must install the sshpass program")
if not conn_password and pkcs11_provider:
raise AnsibleError("to use pkcs11_provider you must specify a password/pin")
self.sshpass_pipe = os.pipe()
b_command += [b'sshpass', b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')]
password_prompt = self.get_option('sshpass_prompt')
if not password_prompt and pkcs11_provider:
# Set default password prompt for pkcs11_provider to make it clear its a PIN
password_prompt = 'Enter PIN for '
if password_prompt:
b_command += [b'-P', to_bytes(password_prompt, errors='surrogate_or_strict')]
b_command += [to_bytes(binary, errors='surrogate_or_strict')]
#
# Next, additional arguments based on the configuration.
#
# pkcs11 mode allows the use of Smartcards or Yubikey devices
if conn_password and pkcs11_provider:
self._add_args(b_command,
(b"-o", b"KbdInteractiveAuthentication=no",
b"-o", b"PreferredAuthentications=publickey",
b"-o", b"PasswordAuthentication=no",
b'-o', to_bytes(u'PKCS11Provider=%s' % pkcs11_provider)),
u'Enable pkcs11')
# sftp batch mode allows us to correctly catch failed transfers, but can
# be disabled if the client side doesn't support the option. However,
# sftp batch mode does not prompt for passwords so it must be disabled
# if not using controlpersist and using sshpass
if subsystem == 'sftp' and self.get_option('sftp_batch_mode'):
if conn_password:
b_args = [b'-o', b'BatchMode=no']
self._add_args(b_command, b_args, u'disable batch mode for sshpass')
b_command += [b'-b', b'-']
if self._play_context.verbosity > 3:
b_command.append(b'-vvv')
# Next, we add ssh_args
ssh_args = self.get_option('ssh_args')
if ssh_args:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in
self._split_ssh_args(ssh_args)]
self._add_args(b_command, b_args, u"ansible.cfg set ssh_args")
# Now we add various arguments that have their own specific settings defined in docs above.
if self.get_option('host_key_checking') is False:
b_args = (b"-o", b"StrictHostKeyChecking=no")
self._add_args(b_command, b_args, u"ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled")
self.port = self.get_option('port')
if self.port is not None:
b_args = (b"-o", b"Port=" + to_bytes(self.port, nonstring='simplerepr', errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set")
key = self.get_option('private_key_file')
if key:
b_args = (b"-o", b'IdentityFile="' + to_bytes(os.path.expanduser(key), errors='surrogate_or_strict') + b'"')
self._add_args(b_command, b_args, u"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set")
if not conn_password:
self._add_args(
b_command, (
b"-o", b"KbdInteractiveAuthentication=no",
b"-o", b"PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
b"-o", b"PasswordAuthentication=no"
),
u"ansible_password/ansible_ssh_password not set"
)
self.user = self.get_option('remote_user')
if self.user:
self._add_args(
b_command,
(b"-o", b'User="%s"' % to_bytes(self.user, errors='surrogate_or_strict')),
u"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set"
)
timeout = self.get_option('timeout')
self._add_args(
b_command,
(b"-o", b"ConnectTimeout=" + to_bytes(timeout, errors='surrogate_or_strict', nonstring='simplerepr')),
u"ANSIBLE_TIMEOUT/timeout set"
)
# Add in any common or binary-specific arguments from the PlayContext
# (i.e. inventory or task settings or overrides on the command line).
for opt in (u'ssh_common_args', u'{0}_extra_args'.format(subsystem)):
attr = self.get_option(opt)
if attr is not None:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in self._split_ssh_args(attr)]
self._add_args(b_command, b_args, u"Set %s" % opt)
# Check if ControlPersist is enabled and add a ControlPath if one hasn't
# already been set.
controlpersist, controlpath = self._persistence_controls(b_command)
if controlpersist:
self._persistent = True
if not controlpath:
self.control_path_dir = self.get_option('control_path_dir')
cpdir = unfrackpath(self.control_path_dir)
b_cpdir = to_bytes(cpdir, errors='surrogate_or_strict')
# The directory must exist and be writable.
makedirs_safe(b_cpdir, 0o700)
if not os.access(b_cpdir, os.W_OK):
raise AnsibleError("Cannot write to ControlPath %s" % to_native(cpdir))
self.control_path = self.get_option('control_path')
if not self.control_path:
self.control_path = self._create_control_path(
self.host,
self.port,
self.user
)
b_args = (b"-o", b'ControlPath="%s"' % to_bytes(self.control_path % dict(directory=cpdir), errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"found only ControlPersist; added ControlPath")
# Finally, we add any caller-supplied extras.
if other_args:
b_command += [to_bytes(a) for a in other_args]
return b_command
def _send_initial_data(self, fh, in_data, ssh_process):
'''
Writes initial data to the stdin filehandle of the subprocess and closes
it. (The handle must be closed; otherwise, for example, "sftp -b -" will
just hang forever waiting for more commands.)
'''
display.debug(u'Sending initial data')
try:
fh.write(to_bytes(in_data))
fh.close()
except (OSError, IOError) as e:
# The ssh connection may have already terminated at this point, with a more useful error
# Only raise AnsibleConnectionFailure if the ssh process is still alive
time.sleep(0.001)
ssh_process.poll()
if getattr(ssh_process, 'returncode', None) is None:
raise AnsibleConnectionFailure(
'Data could not be sent to remote host "%s". Make sure this host can be reached '
'over ssh: %s' % (self.host, to_native(e)), orig_exc=e
)
display.debug(u'Sent initial data (%d bytes)' % len(in_data))
# Used by _run() to kill processes on failures
@staticmethod
def _terminate_process(p):
""" Terminate a process, ignoring errors """
try:
p.terminate()
except (OSError, IOError):
pass
# This is separate from _run() because we need to do the same thing for stdout
# and stderr.
def _examine_output(self, source, state, b_chunk, sudoable):
'''
Takes a string, extracts complete lines from it, tests to see if they
are a prompt, error message, etc., and sets appropriate flags in self.
Prompt and success lines are removed.
Returns the processed (i.e. possibly-edited) output and the unprocessed
remainder (to be processed with the next chunk) as strings.
'''
output = []
for b_line in b_chunk.splitlines(True):
display_line = to_text(b_line).rstrip('\r\n')
suppress_output = False
# display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, display_line))
if self.become.expect_prompt() and self.become.check_password_prompt(b_line):
display.debug(u"become_prompt: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_prompt'] = True
suppress_output = True
elif self.become.success and self.become.check_success(b_line):
display.debug(u"become_success: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_success'] = True
suppress_output = True
elif sudoable and self.become.check_incorrect_password(b_line):
display.debug(u"become_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_error'] = True
elif sudoable and self.become.check_missing_password(b_line):
display.debug(u"become_nopasswd_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_nopasswd_error'] = True
if not suppress_output:
output.append(b_line)
# The chunk we read was most likely a series of complete lines, but just
# in case the last line was incomplete (and not a prompt, which we would
# have removed from the output), we retain it to be processed with the
# next chunk.
remainder = b''
if output and not output[-1].endswith(b'\n'):
remainder = output[-1]
output = output[:-1]
return b''.join(output), remainder
def _bare_run(self, cmd, in_data, sudoable=True, checkrc=True):
'''
Starts the command and communicates with it until it ends.
'''
# We don't use _shell.quote as this is run on the controller and independent from the shell plugin chosen
display_cmd = u' '.join(shlex.quote(to_text(c)) for c in cmd)
display.vvv(u'SSH: EXEC {0}'.format(display_cmd), host=self.host)
# Start the given command. If we don't need to pipeline data, we can try
# to use a pseudo-tty (ssh will have been invoked with -tt). If we are
# pipelining data, or can't create a pty, we fall back to using plain
# old pipes.
p = None
if isinstance(cmd, (text_type, binary_type)):
cmd = to_bytes(cmd)
else:
cmd = list(map(to_bytes, cmd))
conn_password = self.get_option('password') or self._play_context.password
if not in_data:
try:
# Make sure stdin is a proper pty to avoid tcgetattr errors
master, slave = pty.openpty()
if PY3 and conn_password:
# pylint: disable=unexpected-keyword-arg
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
else:
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = os.fdopen(master, 'wb', 0)
os.close(slave)
except (OSError, IOError):
p = None
if not p:
try:
if PY3 and conn_password:
# pylint: disable=unexpected-keyword-arg
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
else:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdin = p.stdin
except (OSError, IOError) as e:
raise AnsibleError('Unable to execute ssh command line on a controller due to: %s' % to_native(e))
# If we are using SSH password authentication, write the password into
# the pipe we opened in _build_command.
if conn_password:
os.close(self.sshpass_pipe[0])
try:
os.write(self.sshpass_pipe[1], to_bytes(conn_password) + b'\n')
except OSError as e:
# Ignore broken pipe errors if the sshpass process has exited.
if e.errno != errno.EPIPE or p.poll() is None:
raise
os.close(self.sshpass_pipe[1])
#
# SSH state machine
#
# Now we read and accumulate output from the running process until it
# exits. Depending on the circumstances, we may also need to write an
# escalation password and/or pipelined input to the process.
states = [
'awaiting_prompt', 'awaiting_escalation', 'ready_to_send', 'awaiting_exit'
]
# Are we requesting privilege escalation? Right now, we may be invoked
# to execute sftp/scp with sudoable=True, but we can request escalation
# only when using ssh. Otherwise we can send initial data straightaway.
state = states.index('ready_to_send')
if to_bytes(self.get_option('ssh_executable')) in cmd and sudoable:
prompt = getattr(self.become, 'prompt', None)
if prompt:
# We're requesting escalation with a password, so we have to
# wait for a password prompt.
state = states.index('awaiting_prompt')
display.debug(u'Initial state: %s: %s' % (states[state], to_text(prompt)))
elif self.become and self.become.success:
# We're requesting escalation without a password, so we have to
# detect success/failure before sending any initial data.
state = states.index('awaiting_escalation')
display.debug(u'Initial state: %s: %s' % (states[state], to_text(self.become.success)))
# We store accumulated stdout and stderr output from the process here,
# but strip any privilege escalation prompt/confirmation lines first.
# Output is accumulated into tmp_*, complete lines are extracted into
# an array, then checked and removed or copied to stdout or stderr. We
# set any flags based on examining the output in self._flags.
b_stdout = b_stderr = b''
b_tmp_stdout = b_tmp_stderr = b''
self._flags = dict(
become_prompt=False, become_success=False,
become_error=False, become_nopasswd_error=False
)
# select timeout should be longer than the connect timeout, otherwise
# they will race each other when we can't connect, and the connect
# timeout usually fails
timeout = 2 + self.get_option('timeout')
for fd in (p.stdout, p.stderr):
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
# TODO: bcoca would like to use SelectSelector() when open
# select is faster when filehandles is low and we only ever handle 1.
selector = selectors.DefaultSelector()
selector.register(p.stdout, selectors.EVENT_READ)
selector.register(p.stderr, selectors.EVENT_READ)
# If we can send initial data without waiting for anything, we do so
# before we start polling
if states[state] == 'ready_to_send' and in_data:
self._send_initial_data(stdin, in_data, p)
state += 1
try:
while True:
poll = p.poll()
events = selector.select(timeout)
# We pay attention to timeouts only while negotiating a prompt.
if not events:
# We timed out
if state <= states.index('awaiting_escalation'):
# If the process has already exited, then it's not really a
# timeout; we'll let the normal error handling deal with it.
if poll is not None:
break
self._terminate_process(p)
raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, to_native(b_stdout)))
# Read whatever output is available on stdout and stderr, and stop
# listening to the pipe if it's been closed.
for key, event in events:
if key.fileobj == p.stdout:
b_chunk = p.stdout.read()
if b_chunk == b'':
# stdout has been closed, stop watching it
selector.unregister(p.stdout)
# When ssh has ControlMaster (+ControlPath/Persist) enabled, the
# first connection goes into the background and we never see EOF
# on stderr. If we see EOF on stdout, lower the select timeout
# to reduce the time wasted selecting on stderr if we observe
# that the process has not yet existed after this EOF. Otherwise
# we may spend a long timeout period waiting for an EOF that is
# not going to arrive until the persisted connection closes.
timeout = 1
b_tmp_stdout += b_chunk
display.debug(u"stdout chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
elif key.fileobj == p.stderr:
b_chunk = p.stderr.read()
if b_chunk == b'':
# stderr has been closed, stop watching it
selector.unregister(p.stderr)
b_tmp_stderr += b_chunk
display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
# We examine the output line-by-line until we have negotiated any
# privilege escalation prompt and subsequent success/error message.
# Afterwards, we can accumulate output without looking at it.
if state < states.index('ready_to_send'):
if b_tmp_stdout:
b_output, b_unprocessed = self._examine_output('stdout', states[state], b_tmp_stdout, sudoable)
b_stdout += b_output
b_tmp_stdout = b_unprocessed
if b_tmp_stderr:
b_output, b_unprocessed = self._examine_output('stderr', states[state], b_tmp_stderr, sudoable)
b_stderr += b_output
b_tmp_stderr = b_unprocessed
else:
b_stdout += b_tmp_stdout
b_stderr += b_tmp_stderr
b_tmp_stdout = b_tmp_stderr = b''
# If we see a privilege escalation prompt, we send the password.
# (If we're expecting a prompt but the escalation succeeds, we
# didn't need the password and can carry on regardless.)
if states[state] == 'awaiting_prompt':
if self._flags['become_prompt']:
display.debug(u'Sending become_password in response to prompt')
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
# On python3 stdin is a BufferedWriter, and we don't have a guarantee
# that the write will happen without a flush
stdin.flush()
self._flags['become_prompt'] = False
state += 1
elif self._flags['become_success']:
state += 1
# We've requested escalation (with or without a password), now we
# wait for an error message or a successful escalation.
if states[state] == 'awaiting_escalation':
if self._flags['become_success']:
display.vvv(u'Escalation succeeded')
self._flags['become_success'] = False
state += 1
elif self._flags['become_error']:
display.vvv(u'Escalation failed')
self._terminate_process(p)
self._flags['become_error'] = False
raise AnsibleError('Incorrect %s password' % self.become.name)
elif self._flags['become_nopasswd_error']:
display.vvv(u'Escalation requires password')
self._terminate_process(p)
self._flags['become_nopasswd_error'] = False
raise AnsibleError('Missing %s password' % self.become.name)
elif self._flags['become_prompt']:
# This shouldn't happen, because we should see the "Sorry,
# try again" message first.
display.vvv(u'Escalation prompt repeated')
self._terminate_process(p)
self._flags['become_prompt'] = False
raise AnsibleError('Incorrect %s password' % self.become.name)
# Once we're sure that the privilege escalation prompt, if any, has
# been dealt with, we can send any initial data and start waiting
# for output.
if states[state] == 'ready_to_send':
if in_data:
self._send_initial_data(stdin, in_data, p)
state += 1
# Now we're awaiting_exit: has the child process exited? If it has,
# and we've read all available output from it, we're done.
if poll is not None:
if not selector.get_map() or not events:
break
# We should not see further writes to the stdout/stderr file
# descriptors after the process has closed, set the select
# timeout to gather any last writes we may have missed.
timeout = 0
continue
# If the process has not yet exited, but we've already read EOF from
# its stdout and stderr (and thus no longer watching any file
# descriptors), we can just wait for it to exit.
elif not selector.get_map():
p.wait()
break
# Otherwise there may still be outstanding data to read.
finally:
selector.close()
# close stdin, stdout, and stderr after process is terminated and
# stdout/stderr are read completely (see also issues #848, #64768).
stdin.close()
p.stdout.close()
p.stderr.close()
if self.get_option('host_key_checking'):
if cmd[0] == b"sshpass" and p.returncode == 6:
raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support '
'this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
controlpersisterror = b'Bad configuration option: ControlPersist' in b_stderr or b'unknown configuration option: ControlPersist' in b_stderr
if p.returncode != 0 and controlpersisterror:
raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" '
'(or ssh_args in [ssh_connection] section of the config file) before running again')
# If we find a broken pipe because of ControlPersist timeout expiring (see #16731),
# we raise a special exception so that we can retry a connection.
controlpersist_broken_pipe = b'mux_client_hello_exchange: write packet: Broken pipe' in b_stderr
if p.returncode == 255:
additional = to_native(b_stderr)
if controlpersist_broken_pipe:
raise AnsibleControlPersistBrokenPipeError('Data could not be sent because of ControlPersist broken pipe: %s' % additional)
elif in_data and checkrc:
raise AnsibleConnectionFailure('Data could not be sent to remote host "%s". Make sure this host can be reached over ssh: %s'
% (self.host, additional))
return (p.returncode, b_stdout, b_stderr)
@_ssh_retry
def _run(self, cmd, in_data, sudoable=True, checkrc=True):
"""Wrapper around _bare_run that retries the connection
"""
return self._bare_run(cmd, in_data, sudoable=sudoable, checkrc=checkrc)
@_ssh_retry
def _file_transport_command(self, in_path, out_path, sftp_action):
# scp and sftp require square brackets for IPv6 addresses, but
# accept them for hostnames and IPv4 addresses too.
host = '[%s]' % self.host
smart_methods = ['sftp', 'scp', 'piped']
# Windows does not support dd so we cannot use the piped method
if getattr(self._shell, "_IS_WINDOWS", False):
smart_methods.remove('piped')
# Transfer methods to try
methods = []
# Use the transfer_method option if set, otherwise use scp_if_ssh
ssh_transfer_method = self.get_option('ssh_transfer_method')
scp_if_ssh = self.get_option('scp_if_ssh')
if ssh_transfer_method is None and scp_if_ssh == 'smart':
ssh_transfer_method = 'smart'
if ssh_transfer_method is not None:
if ssh_transfer_method == 'smart':
methods = smart_methods
else:
methods = [ssh_transfer_method]
else:
# since this can be a non-bool now, we need to handle it correctly
if not isinstance(scp_if_ssh, bool):
scp_if_ssh = scp_if_ssh.lower()
if scp_if_ssh in BOOLEANS:
scp_if_ssh = boolean(scp_if_ssh, strict=False)
elif scp_if_ssh != 'smart':
raise AnsibleOptionsError('scp_if_ssh needs to be one of [smart|True|False]')
if scp_if_ssh == 'smart':
methods = smart_methods
elif scp_if_ssh is True:
methods = ['scp']
else:
methods = ['sftp']
for method in methods:
returncode = stdout = stderr = None
if method == 'sftp':
cmd = self._build_command(self.get_option('sftp_executable'), 'sftp', to_bytes(host))
in_data = u"{0} {1} {2}\n".format(sftp_action, shlex.quote(in_path), shlex.quote(out_path))
in_data = to_bytes(in_data, nonstring='passthru')
(returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
elif method == 'scp':
scp = self.get_option('scp_executable')
if sftp_action == 'get':
cmd = self._build_command(scp, 'scp', u'{0}:{1}'.format(host, self._shell.quote(in_path)), out_path)
else:
cmd = self._build_command(scp, 'scp', in_path, u'{0}:{1}'.format(host, self._shell.quote(out_path)))
in_data = None
(returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
elif method == 'piped':
if sftp_action == 'get':
# we pass sudoable=False to disable pty allocation, which
# would end up mixing stdout/stderr and screwing with newlines
(returncode, stdout, stderr) = self.exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), sudoable=False)
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
out_file.write(stdout)
else:
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as f:
in_data = to_bytes(f.read(), nonstring='passthru')
if not in_data:
count = ' count=0'
else:
count = ''
(returncode, stdout, stderr) = self.exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), in_data=in_data, sudoable=False)
# Check the return code and rollover to next method if failed
if returncode == 0:
return (returncode, stdout, stderr)
else:
# If not in smart mode, the data will be printed by the raise below
if len(methods) > 1:
display.warning(u'%s transfer mechanism failed on %s. Use ANSIBLE_DEBUG=1 to see detailed information' % (method, host))
display.debug(u'%s' % to_text(stdout))
display.debug(u'%s' % to_text(stderr))
if returncode == 255:
raise AnsibleConnectionFailure("Failed to connect to the host via %s: %s" % (method, to_native(stderr)))
else:
raise AnsibleError("failed to transfer file to %s %s:\n%s\n%s" %
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
def _escape_win_path(self, path):
""" converts a Windows path to one that's supported by SFTP and SCP """
# If using a root path then we need to start with /
prefix = ""
if re.match(r'^\w{1}:', path):
prefix = "/"
# Convert all '\' to '/'
return "%s%s" % (prefix, path.replace("\\", "/"))
#
# Main public methods
#
def exec_command(self, cmd, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self.user), host=self.host)
if getattr(self._shell, "_IS_WINDOWS", False):
# Become method 'runas' is done in the wrapper that is executed,
# need to disable sudoable so the bare_run is not waiting for a
# prompt that will not occur
sudoable = False
# Make sure our first command is to set the console encoding to
# utf-8, this must be done via chcp to get utf-8 (65001)
cmd_parts = ["chcp.com", "65001", self._shell._SHELL_REDIRECT_ALLNULL, self._shell._SHELL_AND]
cmd_parts.extend(self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False))
cmd = ' '.join(cmd_parts)
# we can only use tty when we are not pipelining the modules. piping
# data into /usr/bin/python inside a tty automatically invokes the
# python interactive-mode but the modules are not compatible with the
# interactive-mode ("unexpected indent" mainly because of empty lines)
ssh_executable = self.get_option('ssh_executable')
# -tt can cause various issues in some environments so allow the user
# to disable it as a troubleshooting method.
use_tty = self.get_option('use_tty')
if not in_data and sudoable and use_tty:
args = ('-tt', self.host, cmd)
else:
args = (self.host, cmd)
cmd = self._build_command(ssh_executable, 'ssh', *args)
(returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)
# When running on Windows, stderr may contain CLIXML encoded output
if getattr(self._shell, "_IS_WINDOWS", False) and stderr.startswith(b"#< CLIXML"):
stderr = _parse_clixml(stderr)
return (returncode, stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
if getattr(self._shell, "_IS_WINDOWS", False):
out_path = self._escape_win_path(out_path)
return self._file_transport_command(in_path, out_path, 'put')
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
# need to add / if path is rooted
if getattr(self._shell, "_IS_WINDOWS", False):
in_path = self._escape_win_path(in_path)
return self._file_transport_command(in_path, out_path, 'get')
def reset(self):
run_reset = False
# If we have a persistent ssh connection (ControlPersist), we can ask it to stop listening.
# only run the reset if the ControlPath already exists or if it isn't configured and ControlPersist is set
# 'check' will determine this.
cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'check', self.host)
display.vvv(u'sending connection check: %s' % to_text(cmd))
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
status_code = p.wait()
if status_code != 0:
display.vvv(u"No connection to reset: %s" % to_text(stderr))
else:
run_reset = True
if run_reset:
cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'stop', self.host)
display.vvv(u'sending connection stop: %s' % to_text(cmd))
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
status_code = p.wait()
if status_code != 0:
display.warning(u"Failed to reset connection:%s" % to_text(stderr))
self.close()
def close(self):
self._connected = False
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,834 |
Extra Verbosity fails play
|
### Summary
When I use `become: yes` and turn the logging verbosity to `-vvv` it works perfectly fine. When the verbosity is increased to `-vvvv` every run fails at the very beginning. Without `become: yes` even the `-vvvv` passes.
### Issue Type
Bug Report
### Component Name
Core
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = None
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible
python version = 3.8.10 (default, Jun 2 2021, 10:49:15) [GCC 9.4.0]
jinja version = 3.0.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
ANSIBLE_PIPELINING(/workspace/ansible.cfg) = True
CALLBACKS_ENABLED(/workspace/ansible.cfg) = ['ansible.posix.profile_tasks', 'community.general.log_plays']
DEFAULT_FORKS(/workspace/ansible.cfg) = 10
DEFAULT_GATHERING(/workspace/ansible.cfg) = smart
DEFAULT_ROLES_PATH(env: ANSIBLE_ROLES_PATH) = ['/workspace/common-ansible/roles']
INTERPRETER_PYTHON(/workspace/ansible.cfg) = auto
```
### OS / Environment
Ubuntu 20.04.3 LTS
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
---
- hosts: gitlab_runners
become: true
tasks:
- debug:
msg=Success
```
### Expected Results
Play should run through with a lot of debug messages.
### Actual Results
Ouptut with `-vvvv`
```console
ansible-playbook [core 2.11.5]
config file = /workspace/ansible.cfg
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible-playbook
python version = 3.8.10 (default, Jun 2 2021, 10:49:15) [GCC 9.4.0]
jinja version = 3.0.1
libyaml = True
Using /workspace/ansible.cfg as config file
setting up inventory plugins
host_list declined parsing /workspace/inventories/test/hosts.yml as it did not pass its verify_file() method
script declined parsing /workspace/inventories/test/hosts.yml as it did not pass its verify_file() method
Parsed /workspace/inventories/test/hosts.yml inventory source with yaml plugin
Loading callback plugin default of type stdout, v2.0 from /usr/local/lib/python3.8/dist-packages/ansible/plugins/callback/default.py
Loading collection ansible.posix from /usr/local/lib/python3.8/dist-packages/ansible_collections/ansible/posix
Loading collection community.general from /usr/local/lib/python3.8/dist-packages/ansible_collections/community/general
Skipping callback 'default', as we already have a stdout callback.
Skipping callback 'minimal', as we already have a stdout callback.
Skipping callback 'oneline', as we already have a stdout callback.
Loading callback plugin ansible.posix.profile_tasks of type aggregate, v2.0 from /usr/local/lib/python3.8/dist-packages/ansible_collections/ansible/posix/plugins/callback/profile_tasks.py
Loading callback plugin community.general.log_plays of type notification, v2.0 from /usr/local/lib/python3.8/dist-packages/ansible_collections/community/general/plugins/callback/log_plays.py
PLAYBOOK: test.yml *************************************************************
Positional arguments: test.yml
verbosity: 4
connection: smart
timeout: 10
become_method: sudo
tags: ('all',)
inventory: ('/workspace/inventories/test/hosts.yml',)
forks: 10
1 plays in test.yml
PLAY [gitlab_runners] **********************************************************
Trying secret ScriptVaultSecret(filename='/opt/print_vault_password.py') for vault_id=default
TASK [Gathering Facts] *********************************************************
task path: /workspace/test.yml:2
Thursday 30 September 2021 14:38:16 +0000 (0:00:00.060) 0:00:00.060 ****
Using module file /usr/local/lib/python3.8/dist-packages/ansible/modules/setup.py
Pipelining is enabled.
<10.1.1.201> ESTABLISH SSH CONNECTION FOR USER: manager
<10.1.1.201> SSH: EXEC sshpass -d10 ssh -vvv -C -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o 'User="manager"' -o ConnectTimeout=10 -o StrictHostKeyChecking=no -o ControlPath=/root/.ansible/cp/48372ad7a3 10.1.1.201 '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-muszsaqzcjtetkenkeiiqhqnnsdqlvmx ; python3'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<10.1.1.201> (1, b'', b'OpenSSH_8.2p1 Ubuntu-4ubuntu0.3, OpenSSL 1.1.1f 31 Mar 2020\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files\r\ndebug1: /etc/ssh/ssh_config line 21: Applying options for *\r\ndebug2: resolve_canonicalize: hostname 10.1.1.201 is address\r\ndebug1: auto-mux: Trying existing master\r\ndebug1: Control socket "/root/.ansible/cp/48372ad7a3" does not exist\r\ndebug2: ssh_connect_direct\r\ndebug1: Connecting to 10.1.1.201 [10.1.1.201] port 22.\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: fd 3 clearing O_NONBLOCK\r\ndebug1: Connection established.\r\ndebug3: timeout: 10000 ms remain after connect\r\ndebug1: SELinux support disabled\r\ndebug1: identity file /root/.ssh/id_rsa type -1\r\ndebug1: identity file /root/.ssh/id_rsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_dsa type -1\r\ndebug1: identity file /root/.ssh/id_dsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa_sk type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa_sk-cert type -1\r\ndebug1: identity file /root/.ssh/id_ed25519 type -1\r\ndebug1: identity file /root/.ssh/id_ed25519-cert type -1\r\ndebug1: identity file /root/.ssh/id_ed25519_sk type -1\r\ndebug1: identity file /root/.ssh/id_ed25519_sk-cert type -1\r\ndebug1: identity file /root/.ssh/id_xmss type -1\r\ndebug1: identity file /root/.ssh/id_xmss-cert type -1\r\ndebug1: Local version string SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3\r\ndebug1: Remote protocol version 2.0, remote software version OpenSSH_8.2p1 Ubuntu-4ubuntu0.3\r\ndebug1: match: OpenSSH_8.2p1 Ubuntu-4ubuntu0.3 pat OpenSSH* compat 0x04000000\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: Authenticating to 10.1.1.201:22 as \'manager\'\r\ndebug3: send packet: type 20\r\ndebug1: SSH2_MSG_KEXINIT sent\r\ndebug3: receive packet: type 20\r\ndebug1: SSH2_MSG_KEXINIT received\r\ndebug2: local client KEXINIT proposal\r\ndebug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256,ext-info-c\r\ndebug2: host key algorithms: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected],ssh-ed25519,[email protected],rsa-sha2-512,rsa-sha2-256,ssh-rsa\r\ndebug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: compression ctos: [email protected],zlib,none\r\ndebug2: compression stoc: [email protected],zlib,none\r\ndebug2: languages ctos: \r\ndebug2: languages stoc: \r\ndebug2: first_kex_follows 0 \r\ndebug2: reserved 0 \r\ndebug2: peer server KEXINIT proposal\r\ndebug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256\r\ndebug2: host key algorithms: rsa-sha2-512,rsa-sha2-256,ssh-rsa,ecdsa-sha2-nistp256,ssh-ed25519\r\ndebug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: compression ctos: none,[email protected]\r\ndebug2: compression stoc: none,[email protected]\r\ndebug2: languages ctos: \r\ndebug2: languages stoc: \r\ndebug2: first_kex_follows 0 \r\ndebug2: reserved 0 \r\ndebug1: kex: algorithm: curve25519-sha256\r\ndebug1: kex: host key algorithm: ecdsa-sha2-nistp256\r\ndebug1: kex: server->client cipher: [email protected] MAC: <implicit> compression: [email protected]\r\ndebug1: kex: client->server cipher: [email protected] MAC: <implicit> compression: [email protected]\r\ndebug3: send packet: type 30\r\ndebug1: expecting SSH2_MSG_KEX_ECDH_REPLY\r\ndebug3: receive packet: type 31\r\ndebug1: Server host key: ecdsa-sha2-nistp256 SHA256:LylIOfTqWm+geE35hisc6xKHr/ZHTHTs/UQn80pYXOo\r\nWarning: Permanently added \'10.1.1.201\' (ECDSA) to the list of known hosts.\r\ndebug3: send packet: type 21\r\ndebug2: set_newkeys: mode 1\r\ndebug1: rekey out after 134217728 blocks\r\ndebug1: SSH2_MSG_NEWKEYS sent\r\ndebug1: expecting SSH2_MSG_NEWKEYS\r\ndebug3: receive packet: type 21\r\ndebug1: SSH2_MSG_NEWKEYS received\r\ndebug2: set_newkeys: mode 0\r\ndebug1: rekey in after 134217728 blocks\r\ndebug1: Will attempt key: /root/.ssh/id_rsa \r\ndebug1: Will attempt key: /root/.ssh/id_dsa \r\ndebug1: Will attempt key: /root/.ssh/id_ecdsa \r\ndebug1: Will attempt key: /root/.ssh/id_ecdsa_sk \r\ndebug1: Will attempt key: /root/.ssh/id_ed25519 \r\ndebug1: Will attempt key: /root/.ssh/id_ed25519_sk \r\ndebug1: Will attempt key: /root/.ssh/id_xmss \r\ndebug2: pubkey_prepare: done\r\ndebug3: send packet: type 5\r\ndebug3: receive packet: type 7\r\ndebug1: SSH2_MSG_EXT_INFO received\r\ndebug1: kex_input_ext_info: server-sig-algs=<ssh-ed25519,[email protected],ssh-rsa,rsa-sha2-256,rsa-sha2-512,ssh-dss,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected]>\r\ndebug3: receive packet: type 6\r\ndebug2: service_accept: ssh-userauth\r\ndebug1: SSH2_MSG_SERVICE_ACCEPT received\r\ndebug3: send packet: type 50\r\ndebug3: receive packet: type 51\r\ndebug1: Authentications that can continue: publickey,password\r\ndebug3: start over, passed a different list publickey,password\r\ndebug3: preferred gssapi-with-mic,publickey,keyboard-interactive,password\r\ndebug3: authmethod_lookup publickey\r\ndebug3: remaining preferred: keyboard-interactive,password\r\ndebug3: authmethod_is_enabled publickey\r\ndebug1: Next authentication method: publickey\r\ndebug1: Trying private key: /root/.ssh/id_rsa\r\ndebug3: no such identity: /root/.ssh/id_rsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_dsa\r\ndebug3: no such identity: /root/.ssh/id_dsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ecdsa\r\ndebug3: no such identity: /root/.ssh/id_ecdsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ecdsa_sk\r\ndebug3: no such identity: /root/.ssh/id_ecdsa_sk: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ed25519\r\ndebug3: no such identity: /root/.ssh/id_ed25519: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ed25519_sk\r\ndebug3: no such identity: /root/.ssh/id_ed25519_sk: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_xmss\r\ndebug3: no such identity: /root/.ssh/id_xmss: No such file or directory\r\ndebug2: we did not send a packet, disable method\r\ndebug3: authmethod_lookup password\r\ndebug3: remaining preferred: ,password\r\ndebug3: authmethod_is_enabled password\r\ndebug1: Next authentication method: password\r\ndebug3: send packet: type 50\r\ndebug2: we sent a password packet, wait for reply\r\ndebug3: receive packet: type 52\r\ndebug1: Enabling compression at level 6.\r\ndebug1: Authentication succeeded (password).\r\nAuthenticated to 10.1.1.201 ([10.1.1.201]:22).\r\ndebug1: setting up multiplex master socket\r\ndebug3: muxserver_listen: temporary control path /root/.ansible/cp/48372ad7a3.XW29Aj350ciuha3l\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug3: fd 4 is O_NONBLOCK\r\ndebug3: fd 4 is O_NONBLOCK\r\ndebug1: channel 0: new [/root/.ansible/cp/48372ad7a3]\r\ndebug3: muxserver_listen: mux listener channel 0 fd 4\r\ndebug2: fd 3 setting TCP_NODELAY\r\ndebug3: ssh_packet_set_tos: set IP_TOS 0x08\r\ndebug1: control_persist_detach: backgrounding master process\r\ndebug2: control_persist_detach: background process is 16\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug1: forking to background\r\ndebug1: Entering interactive session.\r\ndebug1: pledge: id\r\ndebug2: set_control_persist_exit_time: schedule exit in 60 seconds\r\ndebug1: multiplexing control connection\r\ndebug2: fd 5 setting O_NONBLOCK\r\ndebug3: fd 5 is O_NONBLOCK\r\ndebug1: channel 1: new [mux-control]\r\ndebug3: channel_post_mux_listener: new mux channel 1 fd 5\r\ndebug3: mux_master_read_cb: channel 1: hello sent\r\ndebug2: set_control_persist_exit_time: cancel scheduled exit\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x00000001 len 4\r\ndebug2: mux_master_process_hello: channel 1 slave version 4\r\ndebug2: mux_client_hello_exchange: master version 4\r\ndebug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r\ndebug3: mux_client_request_session: entering\r\ndebug3: mux_client_request_alive: entering\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x10000004 len 4\r\ndebug2: mux_master_process_alive_check: channel 1: alive check\r\ndebug3: mux_client_request_alive: done pid = 18\r\ndebug3: mux_client_request_session: session request sent\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 256\r\ndebug3: mux_master_process_new_session: got fds stdin 6, stdout 7, stderr 8\r\ndebug2: fd 6 setting O_NONBLOCK\r\ndebug2: fd 7 setting O_NONBLOCK\r\ndebug2: fd 8 setting O_NONBLOCK\r\ndebug1: channel 2: new [client-session]\r\ndebug2: mux_master_process_new_session: channel_new: 2 linked to control channel 1\r\ndebug2: channel 2: send open\r\ndebug3: send packet: type 90\r\ndebug3: receive packet: type 80\r\ndebug1: client_input_global_request: rtype [email protected] want_reply 0\r\ndebug3: receive packet: type 91\r\ndebug2: channel_input_open_confirmation: channel 2: callback start\r\ndebug2: client_session2_setup: id 2\r\ndebug1: Sending environment.\r\ndebug1: Sending env LANG = C.UTF-8\r\ndebug2: channel 2: request env confirm 0\r\ndebug3: send packet: type 98\r\ndebug1: Sending command: /bin/sh -c \'sudo -H -S -p "[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:" -u root /bin/sh -c \'"\'"\'echo BECOME-SUCCESS-muszsaqzcjtetkenkeiiqhqnnsdqlvmx ; python3\'"\'"\' && sleep 0\'\r\ndebug2: channel 2: request exec confirm 1\r\ndebug3: send packet: type 98\r\ndebug3: mux_session_confirm: sending success reply\r\ndebug2: channel_input_open_confirmation: channel 2: callback done\r\ndebug2: channel 2: open confirm rwindow 0 rmax 32768\r\ndebug2: channel 2: rcvd adjust 2097152\r\ndebug3: receive packet: type 99\r\ndebug2: channel_input_status_confirm: type 99 id 2\r\ndebug2: exec request accepted on channel 2\r\ndebug2: channel 2: read<=0 rfd 6 len 0\r\ndebug2: channel 2: read failed\r\ndebug2: channel 2: chan_shutdown_read (i0 o0 sock -1 wfd 6 efd 8 [write])\r\ndebug2: channel 2: input open -> drain\r\ndebug2: channel 2: ibuf empty\r\ndebug2: channel 2: send eof\r\ndebug3: send packet: type 96\r\ndebug2: channel 2: input drain -> closed\r\ndebug2: channel 2: rcvd adjust 65536\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 18\r\nSorry, try again.\ndebug2: channel 2: written 18 to efd 8\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 18\r\nSorry, try again.\ndebug2: channel 2: written 18 to efd 8\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 36\r\nsudo: 3 incorrect password attempts\ndebug2: channel 2: written 36 to efd 8\r\ndebug3: receive packet: type 98\r\ndebug1: client_input_channel_req: channel 2 rtype [email protected] reply 0\r\ndebug2: channel 2: rcvd eow\r\ndebug3: receive packet: type 96\r\ndebug2: channel 2: rcvd eof\r\ndebug2: channel 2: output open -> drain\r\ndebug2: channel 2: obuf empty\r\ndebug2: channel 2: chan_shutdown_write (i3 o1 sock -1 wfd 7 efd 8 [write])\r\ndebug2: channel 2: output drain -> closed\r\ndebug2: channel 2: send close\r\ndebug3: send packet: type 97\r\ndebug3: channel 2: will not send data after close\r\ndebug3: receive packet: type 98\r\ndebug1: client_input_channel_req: channel 2 rtype exit-status reply 0\r\ndebug3: mux_exit_message: channel 2: exit message, exitval 1\r\ndebug3: receive packet: type 97\r\ndebug2: channel 2: rcvd close\r\ndebug3: channel 2: will not send data after close\r\ndebug2: channel 2: is dead\r\ndebug2: channel 2: gc: notify user\r\ndebug3: mux_master_session_cleanup_cb: entering for channel 2\r\ndebug2: channel 1: rcvd close\r\ndebug2: channel 1: output open -> drain\r\ndebug2: channel 1: chan_shutdown_read (i0 o1 sock 5 wfd 5 efd -1 [closed])\r\ndebug2: channel 1: input open -> closed\r\ndebug2: channel 2: gc: user detached\r\ndebug2: channel 2: is dead\r\ndebug2: channel 2: garbage collecting\r\ndebug1: channel 2: free: client-session, nchannels 3\r\ndebug3: channel 2: status: The following connections are open:\r\n #1 mux-control (t16 nr0 i3/0 o1/16 e[closed]/0 fd 5/5/-1 sock 5 cc -1)\r\n #2 client-session (t4 r0 i3/0 o3/0 e[write]/0 fd -1/-1/8 sock -1 cc -1)\r\n\r\ndebug2: channel 1: obuf empty\r\ndebug2: channel 1: chan_shutdown_write (i3 o1 sock 5 wfd 5 efd -1 [closed])\r\ndebug2: channel 1: output drain -> closed\r\ndebug2: channel 1: is dead (local)\r\ndebug2: channel 1: gc: notify user\r\ndebug3: mux_master_control_cleanup_cb: entering for channel 1\r\ndebug2: channel 1: gc: user detached\r\ndebug2: channel 1: is dead (local)\r\ndebug2: channel 1: garbage collecting\r\ndebug1: channel 1: free: mux-control, nchannels 2\r\ndebug3: channel 1: status: The following connections are open:\r\n #1 mux-control (t16 nr0 i3/0 o3/0 e[closed]/0 fd 5/5/-1 sock 5 cc -1)\r\n\r\ndebug2: set_control_persist_exit_time: schedule exit in 60 seconds\r\ndebug3: mux_client_read_packet: read header failed: Broken pipe\r\ndebug2: Received exit status from master 1\r\n')
<10.1.1.201> Failed to connect to the host via ssh: OpenSSH_8.2p1 Ubuntu-4ubuntu0.3, OpenSSL 1.1.1f 31 Mar 2020
debug1: Reading configuration data /etc/ssh/ssh_config
debug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files
debug1: /etc/ssh/ssh_config line 21: Applying options for *
debug2: resolve_canonicalize: hostname 10.1.1.201 is address
debug1: auto-mux: Trying existing master
debug1: Control socket "/root/.ansible/cp/48372ad7a3" does not exist
debug2: ssh_connect_direct
debug1: Connecting to 10.1.1.201 [10.1.1.201] port 22.
debug2: fd 3 setting O_NONBLOCK
debug1: fd 3 clearing O_NONBLOCK
debug1: Connection established.
debug3: timeout: 10000 ms remain after connect
debug1: SELinux support disabled
debug1: identity file /root/.ssh/id_rsa type -1
debug1: identity file /root/.ssh/id_rsa-cert type -1
debug1: identity file /root/.ssh/id_dsa type -1
debug1: identity file /root/.ssh/id_dsa-cert type -1
debug1: identity file /root/.ssh/id_ecdsa type -1
debug1: identity file /root/.ssh/id_ecdsa-cert type -1
debug1: identity file /root/.ssh/id_ecdsa_sk type -1
debug1: identity file /root/.ssh/id_ecdsa_sk-cert type -1
debug1: identity file /root/.ssh/id_ed25519 type -1
debug1: identity file /root/.ssh/id_ed25519-cert type -1
debug1: identity file /root/.ssh/id_ed25519_sk type -1
debug1: identity file /root/.ssh/id_ed25519_sk-cert type -1
debug1: identity file /root/.ssh/id_xmss type -1
debug1: identity file /root/.ssh/id_xmss-cert type -1
debug1: Local version string SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3
debug1: Remote protocol version 2.0, remote software version OpenSSH_8.2p1 Ubuntu-4ubuntu0.3
debug1: match: OpenSSH_8.2p1 Ubuntu-4ubuntu0.3 pat OpenSSH* compat 0x04000000
debug2: fd 3 setting O_NONBLOCK
debug1: Authenticating to 10.1.1.201:22 as 'manager'
debug3: send packet: type 20
debug1: SSH2_MSG_KEXINIT sent
debug3: receive packet: type 20
debug1: SSH2_MSG_KEXINIT received
debug2: local client KEXINIT proposal
debug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256,ext-info-c
debug2: host key algorithms: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected],ssh-ed25519,[email protected],rsa-sha2-512,rsa-sha2-256,ssh-rsa
debug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]
debug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]
debug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1
debug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1
debug2: compression ctos: [email protected],zlib,none
debug2: compression stoc: [email protected],zlib,none
debug2: languages ctos:
debug2: languages stoc:
debug2: first_kex_follows 0
debug2: reserved 0
debug2: peer server KEXINIT proposal
debug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256
debug2: host key algorithms: rsa-sha2-512,rsa-sha2-256,ssh-rsa,ecdsa-sha2-nistp256,ssh-ed25519
debug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]
debug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]
debug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1
debug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1
debug2: compression ctos: none,[email protected]
debug2: compression stoc: none,[email protected]
debug2: languages ctos:
debug2: languages stoc:
debug2: first_kex_follows 0
debug2: reserved 0
debug1: kex: algorithm: curve25519-sha256
debug1: kex: host key algorithm: ecdsa-sha2-nistp256
debug1: kex: server->client cipher: [email protected] MAC: <implicit> compression: [email protected]
debug1: kex: client->server cipher: [email protected] MAC: <implicit> compression: [email protected]
debug3: send packet: type 30
debug1: expecting SSH2_MSG_KEX_ECDH_REPLY
debug3: receive packet: type 31
debug1: Server host key: ecdsa-sha2-nistp256 SHA256:LylIOfTqWm+geE35hisc6xKHr/ZHTHTs/UQn80pYXOo
Warning: Permanently added '10.1.1.201' (ECDSA) to the list of known hosts.
debug3: send packet: type 21
debug2: set_newkeys: mode 1
debug1: rekey out after 134217728 blocks
debug1: SSH2_MSG_NEWKEYS sent
debug1: expecting SSH2_MSG_NEWKEYS
debug3: receive packet: type 21
debug1: SSH2_MSG_NEWKEYS received
debug2: set_newkeys: mode 0
debug1: rekey in after 134217728 blocks
debug1: Will attempt key: /root/.ssh/id_rsa
debug1: Will attempt key: /root/.ssh/id_dsa
debug1: Will attempt key: /root/.ssh/id_ecdsa
debug1: Will attempt key: /root/.ssh/id_ecdsa_sk
debug1: Will attempt key: /root/.ssh/id_ed25519
debug1: Will attempt key: /root/.ssh/id_ed25519_sk
debug1: Will attempt key: /root/.ssh/id_xmss
debug2: pubkey_prepare: done
debug3: send packet: type 5
debug3: receive packet: type 7
debug1: SSH2_MSG_EXT_INFO received
debug1: kex_input_ext_info: server-sig-algs=<ssh-ed25519,[email protected],ssh-rsa,rsa-sha2-256,rsa-sha2-512,ssh-dss,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected]>
debug3: receive packet: type 6
debug2: service_accept: ssh-userauth
debug1: SSH2_MSG_SERVICE_ACCEPT received
debug3: send packet: type 50
debug3: receive packet: type 51
debug1: Authentications that can continue: publickey,password
debug3: start over, passed a different list publickey,password
debug3: preferred gssapi-with-mic,publickey,keyboard-interactive,password
debug3: authmethod_lookup publickey
debug3: remaining preferred: keyboard-interactive,password
debug3: authmethod_is_enabled publickey
debug1: Next authentication method: publickey
debug1: Trying private key: /root/.ssh/id_rsa
debug3: no such identity: /root/.ssh/id_rsa: No such file or directory
debug1: Trying private key: /root/.ssh/id_dsa
debug3: no such identity: /root/.ssh/id_dsa: No such file or directory
debug1: Trying private key: /root/.ssh/id_ecdsa
debug3: no such identity: /root/.ssh/id_ecdsa: No such file or directory
debug1: Trying private key: /root/.ssh/id_ecdsa_sk
debug3: no such identity: /root/.ssh/id_ecdsa_sk: No such file or directory
debug1: Trying private key: /root/.ssh/id_ed25519
debug3: no such identity: /root/.ssh/id_ed25519: No such file or directory
debug1: Trying private key: /root/.ssh/id_ed25519_sk
debug3: no such identity: /root/.ssh/id_ed25519_sk: No such file or directory
debug1: Trying private key: /root/.ssh/id_xmss
debug3: no such identity: /root/.ssh/id_xmss: No such file or directory
debug2: we did not send a packet, disable method
debug3: authmethod_lookup password
debug3: remaining preferred: ,password
debug3: authmethod_is_enabled password
debug1: Next authentication method: password
debug3: send packet: type 50
debug2: we sent a password packet, wait for reply
debug3: receive packet: type 52
debug1: Enabling compression at level 6.
debug1: Authentication succeeded (password).
Authenticated to 10.1.1.201 ([10.1.1.201]:22).
debug1: setting up multiplex master socket
debug3: muxserver_listen: temporary control path /root/.ansible/cp/48372ad7a3.XW29Aj350ciuha3l
debug2: fd 4 setting O_NONBLOCK
debug3: fd 4 is O_NONBLOCK
debug3: fd 4 is O_NONBLOCK
debug1: channel 0: new [/root/.ansible/cp/48372ad7a3]
debug3: muxserver_listen: mux listener channel 0 fd 4
debug2: fd 3 setting TCP_NODELAY
debug3: ssh_packet_set_tos: set IP_TOS 0x08
debug1: control_persist_detach: backgrounding master process
debug2: control_persist_detach: background process is 16
debug2: fd 4 setting O_NONBLOCK
debug1: forking to background
debug1: Entering interactive session.
debug1: pledge: id
debug2: set_control_persist_exit_time: schedule exit in 60 seconds
debug1: multiplexing control connection
debug2: fd 5 setting O_NONBLOCK
debug3: fd 5 is O_NONBLOCK
debug1: channel 1: new [mux-control]
debug3: channel_post_mux_listener: new mux channel 1 fd 5
debug3: mux_master_read_cb: channel 1: hello sent
debug2: set_control_persist_exit_time: cancel scheduled exit
debug3: mux_master_read_cb: channel 1 packet type 0x00000001 len 4
debug2: mux_master_process_hello: channel 1 slave version 4
debug2: mux_client_hello_exchange: master version 4
debug3: mux_client_forwards: request forwardings: 0 local, 0 remote
debug3: mux_client_request_session: entering
debug3: mux_client_request_alive: entering
debug3: mux_master_read_cb: channel 1 packet type 0x10000004 len 4
debug2: mux_master_process_alive_check: channel 1: alive check
debug3: mux_client_request_alive: done pid = 18
debug3: mux_client_request_session: session request sent
debug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 256
debug3: mux_master_process_new_session: got fds stdin 6, stdout 7, stderr 8
debug2: fd 6 setting O_NONBLOCK
debug2: fd 7 setting O_NONBLOCK
debug2: fd 8 setting O_NONBLOCK
debug1: channel 2: new [client-session]
debug2: mux_master_process_new_session: channel_new: 2 linked to control channel 1
debug2: channel 2: send open
debug3: send packet: type 90
debug3: receive packet: type 80
debug1: client_input_global_request: rtype [email protected] want_reply 0
debug3: receive packet: type 91
debug2: channel_input_open_confirmation: channel 2: callback start
debug2: client_session2_setup: id 2
debug1: Sending environment.
debug1: Sending env LANG = C.UTF-8
debug2: channel 2: request env confirm 0
debug3: send packet: type 98
debug1: Sending command: /bin/sh -c 'sudo -H -S -p "[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:" -u root /bin/sh -c '"'"'echo BECOME-SUCCESS-muszsaqzcjtetkenkeiiqhqnnsdqlvmx ; python3'"'"' && sleep 0'
debug2: channel 2: request exec confirm 1
debug3: send packet: type 98
debug3: mux_session_confirm: sending success reply
debug2: channel_input_open_confirmation: channel 2: callback done
debug2: channel 2: open confirm rwindow 0 rmax 32768
debug2: channel 2: rcvd adjust 2097152
debug3: receive packet: type 99
debug2: channel_input_status_confirm: type 99 id 2
debug2: exec request accepted on channel 2
debug2: channel 2: read<=0 rfd 6 len 0
debug2: channel 2: read failed
debug2: channel 2: chan_shutdown_read (i0 o0 sock -1 wfd 6 efd 8 [write])
debug2: channel 2: input open -> drain
debug2: channel 2: ibuf empty
debug2: channel 2: send eof
debug3: send packet: type 96
debug2: channel 2: input drain -> closed
debug2: channel 2: rcvd adjust 65536
debug2: channel 2: rcvd ext data 66
[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8
debug2: channel 2: rcvd ext data 18
Sorry, try again.
debug2: channel 2: written 18 to efd 8
debug2: channel 2: rcvd ext data 66
[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8
debug2: channel 2: rcvd ext data 18
Sorry, try again.
debug2: channel 2: written 18 to efd 8
debug2: channel 2: rcvd ext data 66
[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8
debug2: channel 2: rcvd ext data 36
sudo: 3 incorrect password attempts
debug2: channel 2: written 36 to efd 8
debug3: receive packet: type 98
debug1: client_input_channel_req: channel 2 rtype [email protected] reply 0
debug2: channel 2: rcvd eow
debug3: receive packet: type 96
debug2: channel 2: rcvd eof
debug2: channel 2: output open -> drain
debug2: channel 2: obuf empty
debug2: channel 2: chan_shutdown_write (i3 o1 sock -1 wfd 7 efd 8 [write])
debug2: channel 2: output drain -> closed
debug2: channel 2: send close
debug3: send packet: type 97
debug3: channel 2: will not send data after close
debug3: receive packet: type 98
debug1: client_input_channel_req: channel 2 rtype exit-status reply 0
debug3: mux_exit_message: channel 2: exit message, exitval 1
debug3: receive packet: type 97
debug2: channel 2: rcvd close
debug3: channel 2: will not send data after close
debug2: channel 2: is dead
debug2: channel 2: gc: notify user
debug3: mux_master_session_cleanup_cb: entering for channel 2
debug2: channel 1: rcvd close
debug2: channel 1: output open -> drain
debug2: channel 1: chan_shutdown_read (i0 o1 sock 5 wfd 5 efd -1 [closed])
debug2: channel 1: input open -> closed
debug2: channel 2: gc: user detached
debug2: channel 2: is dead
debug2: channel 2: garbage collecting
debug1: channel 2: free: client-session, nchannels 3
debug3: channel 2: status: The following connections are open:
#1 mux-control (t16 nr0 i3/0 o1/16 e[closed]/0 fd 5/5/-1 sock 5 cc -1)
#2 client-session (t4 r0 i3/0 o3/0 e[write]/0 fd -1/-1/8 sock -1 cc -1)
debug2: channel 1: obuf empty
debug2: channel 1: chan_shutdown_write (i3 o1 sock 5 wfd 5 efd -1 [closed])
debug2: channel 1: output drain -> closed
debug2: channel 1: is dead (local)
debug2: channel 1: gc: notify user
debug3: mux_master_control_cleanup_cb: entering for channel 1
debug2: channel 1: gc: user detached
debug2: channel 1: is dead (local)
debug2: channel 1: garbage collecting
debug1: channel 1: free: mux-control, nchannels 2
debug3: channel 1: status: The following connections are open:
#1 mux-control (t16 nr0 i3/0 o3/0 e[closed]/0 fd 5/5/-1 sock 5 cc -1)
debug2: set_control_persist_exit_time: schedule exit in 60 seconds
debug3: mux_client_read_packet: read header failed: Broken pipe
debug2: Received exit status from master 1
fatal: [test1]: FAILED! => {
"ansible_facts": {},
"changed": false,
"failed_modules": {
"ansible.legacy.setup": {
"failed": true,
"module_stderr": "OpenSSH_8.2p1 Ubuntu-4ubuntu0.3, OpenSSL 1.1.1f 31 Mar 2020\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files\r\ndebug1: /etc/ssh/ssh_config line 21: Applying options for *\r\ndebug2: resolve_canonicalize: hostname 10.1.1.201 is address\r\ndebug1: auto-mux: Trying existing master\r\ndebug1: Control socket \"/root/.ansible/cp/48372ad7a3\" does not exist\r\ndebug2: ssh_connect_direct\r\ndebug1: Connecting to 10.1.1.201 [10.1.1.201] port 22.\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: fd 3 clearing O_NONBLOCK\r\ndebug1: Connection established.\r\ndebug3: timeout: 10000 ms remain after connect\r\ndebug1: SELinux support disabled\r\ndebug1: identity file /root/.ssh/id_rsa type -1\r\ndebug1: identity file /root/.ssh/id_rsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_dsa type -1\r\ndebug1: identity file /root/.ssh/id_dsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa-cert type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa_sk type -1\r\ndebug1: identity file /root/.ssh/id_ecdsa_sk-cert type -1\r\ndebug1: identity file /root/.ssh/id_ed25519 type -1\r\ndebug1: identity file /root/.ssh/id_ed25519-cert type -1\r\ndebug1: identity file /root/.ssh/id_ed25519_sk type -1\r\ndebug1: identity file /root/.ssh/id_ed25519_sk-cert type -1\r\ndebug1: identity file /root/.ssh/id_xmss type -1\r\ndebug1: identity file /root/.ssh/id_xmss-cert type -1\r\ndebug1: Local version string SSH-2.0-OpenSSH_8.2p1 Ubuntu-4ubuntu0.3\r\ndebug1: Remote protocol version 2.0, remote software version OpenSSH_8.2p1 Ubuntu-4ubuntu0.3\r\ndebug1: match: OpenSSH_8.2p1 Ubuntu-4ubuntu0.3 pat OpenSSH* compat 0x04000000\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: Authenticating to 10.1.1.201:22 as 'manager'\r\ndebug3: send packet: type 20\r\ndebug1: SSH2_MSG_KEXINIT sent\r\ndebug3: receive packet: type 20\r\ndebug1: SSH2_MSG_KEXINIT received\r\ndebug2: local client KEXINIT proposal\r\ndebug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256,ext-info-c\r\ndebug2: host key algorithms: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected],ssh-ed25519,[email protected],rsa-sha2-512,rsa-sha2-256,ssh-rsa\r\ndebug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: compression ctos: [email protected],zlib,none\r\ndebug2: compression stoc: [email protected],zlib,none\r\ndebug2: languages ctos: \r\ndebug2: languages stoc: \r\ndebug2: first_kex_follows 0 \r\ndebug2: reserved 0 \r\ndebug2: peer server KEXINIT proposal\r\ndebug2: KEX algorithms: curve25519-sha256,[email protected],ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256\r\ndebug2: host key algorithms: rsa-sha2-512,rsa-sha2-256,ssh-rsa,ecdsa-sha2-nistp256,ssh-ed25519\r\ndebug2: ciphers ctos: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: ciphers stoc: [email protected],aes128-ctr,aes192-ctr,aes256-ctr,[email protected],[email protected]\r\ndebug2: MACs ctos: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: MACs stoc: [email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],hmac-sha2-256,hmac-sha2-512,hmac-sha1\r\ndebug2: compression ctos: none,[email protected]\r\ndebug2: compression stoc: none,[email protected]\r\ndebug2: languages ctos: \r\ndebug2: languages stoc: \r\ndebug2: first_kex_follows 0 \r\ndebug2: reserved 0 \r\ndebug1: kex: algorithm: curve25519-sha256\r\ndebug1: kex: host key algorithm: ecdsa-sha2-nistp256\r\ndebug1: kex: server->client cipher: [email protected] MAC: <implicit> compression: [email protected]\r\ndebug1: kex: client->server cipher: [email protected] MAC: <implicit> compression: [email protected]\r\ndebug3: send packet: type 30\r\ndebug1: expecting SSH2_MSG_KEX_ECDH_REPLY\r\ndebug3: receive packet: type 31\r\ndebug1: Server host key: ecdsa-sha2-nistp256 SHA256:LylIOfTqWm+geE35hisc6xKHr/ZHTHTs/UQn80pYXOo\r\nWarning: Permanently added '10.1.1.201' (ECDSA) to the list of known hosts.\r\ndebug3: send packet: type 21\r\ndebug2: set_newkeys: mode 1\r\ndebug1: rekey out after 134217728 blocks\r\ndebug1: SSH2_MSG_NEWKEYS sent\r\ndebug1: expecting SSH2_MSG_NEWKEYS\r\ndebug3: receive packet: type 21\r\ndebug1: SSH2_MSG_NEWKEYS received\r\ndebug2: set_newkeys: mode 0\r\ndebug1: rekey in after 134217728 blocks\r\ndebug1: Will attempt key: /root/.ssh/id_rsa \r\ndebug1: Will attempt key: /root/.ssh/id_dsa \r\ndebug1: Will attempt key: /root/.ssh/id_ecdsa \r\ndebug1: Will attempt key: /root/.ssh/id_ecdsa_sk \r\ndebug1: Will attempt key: /root/.ssh/id_ed25519 \r\ndebug1: Will attempt key: /root/.ssh/id_ed25519_sk \r\ndebug1: Will attempt key: /root/.ssh/id_xmss \r\ndebug2: pubkey_prepare: done\r\ndebug3: send packet: type 5\r\ndebug3: receive packet: type 7\r\ndebug1: SSH2_MSG_EXT_INFO received\r\ndebug1: kex_input_ext_info: server-sig-algs=<ssh-ed25519,[email protected],ssh-rsa,rsa-sha2-256,rsa-sha2-512,ssh-dss,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,[email protected]>\r\ndebug3: receive packet: type 6\r\ndebug2: service_accept: ssh-userauth\r\ndebug1: SSH2_MSG_SERVICE_ACCEPT received\r\ndebug3: send packet: type 50\r\ndebug3: receive packet: type 51\r\ndebug1: Authentications that can continue: publickey,password\r\ndebug3: start over, passed a different list publickey,password\r\ndebug3: preferred gssapi-with-mic,publickey,keyboard-interactive,password\r\ndebug3: authmethod_lookup publickey\r\ndebug3: remaining preferred: keyboard-interactive,password\r\ndebug3: authmethod_is_enabled publickey\r\ndebug1: Next authentication method: publickey\r\ndebug1: Trying private key: /root/.ssh/id_rsa\r\ndebug3: no such identity: /root/.ssh/id_rsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_dsa\r\ndebug3: no such identity: /root/.ssh/id_dsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ecdsa\r\ndebug3: no such identity: /root/.ssh/id_ecdsa: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ecdsa_sk\r\ndebug3: no such identity: /root/.ssh/id_ecdsa_sk: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ed25519\r\ndebug3: no such identity: /root/.ssh/id_ed25519: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_ed25519_sk\r\ndebug3: no such identity: /root/.ssh/id_ed25519_sk: No such file or directory\r\ndebug1: Trying private key: /root/.ssh/id_xmss\r\ndebug3: no such identity: /root/.ssh/id_xmss: No such file or directory\r\ndebug2: we did not send a packet, disable method\r\ndebug3: authmethod_lookup password\r\ndebug3: remaining preferred: ,password\r\ndebug3: authmethod_is_enabled password\r\ndebug1: Next authentication method: password\r\ndebug3: send packet: type 50\r\ndebug2: we sent a password packet, wait for reply\r\ndebug3: receive packet: type 52\r\ndebug1: Enabling compression at level 6.\r\ndebug1: Authentication succeeded (password).\r\nAuthenticated to 10.1.1.201 ([10.1.1.201]:22).\r\ndebug1: setting up multiplex master socket\r\ndebug3: muxserver_listen: temporary control path /root/.ansible/cp/48372ad7a3.XW29Aj350ciuha3l\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug3: fd 4 is O_NONBLOCK\r\ndebug3: fd 4 is O_NONBLOCK\r\ndebug1: channel 0: new [/root/.ansible/cp/48372ad7a3]\r\ndebug3: muxserver_listen: mux listener channel 0 fd 4\r\ndebug2: fd 3 setting TCP_NODELAY\r\ndebug3: ssh_packet_set_tos: set IP_TOS 0x08\r\ndebug1: control_persist_detach: backgrounding master process\r\ndebug2: control_persist_detach: background process is 16\r\ndebug2: fd 4 setting O_NONBLOCK\r\ndebug1: forking to background\r\ndebug1: Entering interactive session.\r\ndebug1: pledge: id\r\ndebug2: set_control_persist_exit_time: schedule exit in 60 seconds\r\ndebug1: multiplexing control connection\r\ndebug2: fd 5 setting O_NONBLOCK\r\ndebug3: fd 5 is O_NONBLOCK\r\ndebug1: channel 1: new [mux-control]\r\ndebug3: channel_post_mux_listener: new mux channel 1 fd 5\r\ndebug3: mux_master_read_cb: channel 1: hello sent\r\ndebug2: set_control_persist_exit_time: cancel scheduled exit\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x00000001 len 4\r\ndebug2: mux_master_process_hello: channel 1 slave version 4\r\ndebug2: mux_client_hello_exchange: master version 4\r\ndebug3: mux_client_forwards: request forwardings: 0 local, 0 remote\r\ndebug3: mux_client_request_session: entering\r\ndebug3: mux_client_request_alive: entering\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x10000004 len 4\r\ndebug2: mux_master_process_alive_check: channel 1: alive check\r\ndebug3: mux_client_request_alive: done pid = 18\r\ndebug3: mux_client_request_session: session request sent\r\ndebug3: mux_master_read_cb: channel 1 packet type 0x10000002 len 256\r\ndebug3: mux_master_process_new_session: got fds stdin 6, stdout 7, stderr 8\r\ndebug2: fd 6 setting O_NONBLOCK\r\ndebug2: fd 7 setting O_NONBLOCK\r\ndebug2: fd 8 setting O_NONBLOCK\r\ndebug1: channel 2: new [client-session]\r\ndebug2: mux_master_process_new_session: channel_new: 2 linked to control channel 1\r\ndebug2: channel 2: send open\r\ndebug3: send packet: type 90\r\ndebug3: receive packet: type 80\r\ndebug1: client_input_global_request: rtype [email protected] want_reply 0\r\ndebug3: receive packet: type 91\r\ndebug2: channel_input_open_confirmation: channel 2: callback start\r\ndebug2: client_session2_setup: id 2\r\ndebug1: Sending environment.\r\ndebug1: Sending env LANG = C.UTF-8\r\ndebug2: channel 2: request env confirm 0\r\ndebug3: send packet: type 98\r\ndebug1: Sending command: /bin/sh -c 'sudo -H -S -p \"[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:\" -u root /bin/sh -c '\"'\"'echo BECOME-SUCCESS-muszsaqzcjtetkenkeiiqhqnnsdqlvmx ; python3'\"'\"' && sleep 0'\r\ndebug2: channel 2: request exec confirm 1\r\ndebug3: send packet: type 98\r\ndebug3: mux_session_confirm: sending success reply\r\ndebug2: channel_input_open_confirmation: channel 2: callback done\r\ndebug2: channel 2: open confirm rwindow 0 rmax 32768\r\ndebug2: channel 2: rcvd adjust 2097152\r\ndebug3: receive packet: type 99\r\ndebug2: channel_input_status_confirm: type 99 id 2\r\ndebug2: exec request accepted on channel 2\r\ndebug2: channel 2: read<=0 rfd 6 len 0\r\ndebug2: channel 2: read failed\r\ndebug2: channel 2: chan_shutdown_read (i0 o0 sock -1 wfd 6 efd 8 [write])\r\ndebug2: channel 2: input open -> drain\r\ndebug2: channel 2: ibuf empty\r\ndebug2: channel 2: send eof\r\ndebug3: send packet: type 96\r\ndebug2: channel 2: input drain -> closed\r\ndebug2: channel 2: rcvd adjust 65536\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 18\r\nSorry, try again.\ndebug2: channel 2: written 18 to efd 8\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 18\r\nSorry, try again.\ndebug2: channel 2: written 18 to efd 8\r\ndebug2: channel 2: rcvd ext data 66\r\n[sudo via ansible, key=muszsaqzcjtetkenkeiiqhqnnsdqlvmx] password:debug2: channel 2: written 66 to efd 8\r\ndebug2: channel 2: rcvd ext data 36\r\nsudo: 3 incorrect password attempts\ndebug2: channel 2: written 36 to efd 8\r\ndebug3: receive packet: type 98\r\ndebug1: client_input_channel_req: channel 2 rtype [email protected] reply 0\r\ndebug2: channel 2: rcvd eow\r\ndebug3: receive packet: type 96\r\ndebug2: channel 2: rcvd eof\r\ndebug2: channel 2: output open -> drain\r\ndebug2: channel 2: obuf empty\r\ndebug2: channel 2: chan_shutdown_write (i3 o1 sock -1 wfd 7 efd 8 [write])\r\ndebug2: channel 2: output drain -> closed\r\ndebug2: channel 2: send close\r\ndebug3: send packet: type 97\r\ndebug3: channel 2: will not send data after close\r\ndebug3: receive packet: type 98\r\ndebug1: client_input_channel_req: channel 2 rtype exit-status reply 0\r\ndebug3: mux_exit_message: channel 2: exit message, exitval 1\r\ndebug3: receive packet: type 97\r\ndebug2: channel 2: rcvd close\r\ndebug3: channel 2: will not send data after close\r\ndebug2: channel 2: is dead\r\ndebug2: channel 2: gc: notify user\r\ndebug3: mux_master_session_cleanup_cb: entering for channel 2\r\ndebug2: channel 1: rcvd close\r\ndebug2: channel 1: output open -> drain\r\ndebug2: channel 1: chan_shutdown_read (i0 o1 sock 5 wfd 5 efd -1 [closed])\r\ndebug2: channel 1: input open -> closed\r\ndebug2: channel 2: gc: user detached\r\ndebug2: channel 2: is dead\r\ndebug2: channel 2: garbage collecting\r\ndebug1: channel 2: free: client-session, nchannels 3\r\ndebug3: channel 2: status: The following connections are open:\r\n #1 mux-control (t16 nr0 i3/0 o1/16 e[closed]/0 fd 5/5/-1 sock 5 cc -1)\r\n #2 client-session (t4 r0 i3/0 o3/0 e[write]/0 fd -1/-1/8 sock -1 cc -1)\r\n\r\ndebug2: channel 1: obuf empty\r\ndebug2: channel 1: chan_shutdown_write (i3 o1 sock 5 wfd 5 efd -1 [closed])\r\ndebug2: channel 1: output drain -> closed\r\ndebug2: channel 1: is dead (local)\r\ndebug2: channel 1: gc: notify user\r\ndebug3: mux_master_control_cleanup_cb: entering for channel 1\r\ndebug2: channel 1: gc: user detached\r\ndebug2: channel 1: is dead (local)\r\ndebug2: channel 1: garbage collecting\r\ndebug1: channel 1: free: mux-control, nchannels 2\r\ndebug3: channel 1: status: The following connections are open:\r\n #1 mux-control (t16 nr0 i3/0 o3/0 e[closed]/0 fd 5/5/-1 sock 5 cc -1)\r\n\r\ndebug2: set_control_persist_exit_time: schedule exit in 60 seconds\r\ndebug3: mux_client_read_packet: read header failed: Broken pipe\r\ndebug2: Received exit status from master 1\r\n",
"module_stdout": "",
"msg": "MODULE FAILURE\nSee stdout/stderr for the exact error",
"rc": 1
}
},
"msg": "The following modules failed to execute: ansible.legacy.setup\n"
}
PLAY RECAP *********************************************************************
test1 : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
Thursday 30 September 2021 14:38:33 +0000 (0:00:17.037) 0:00:17.098 ****
===============================================================================
Gathering Facts -------------------------------------------------------- 17.04s
/workspace/test.yml:2 ---------------------------------------------------------
```
Ouptut with `-vvv`
```console
ansible-playbook [core 2.11.5]
config file = /workspace/ansible.cfg
configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.8/dist-packages/ansible
ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/local/bin/ansible-playbook
python version = 3.8.10 (default, Jun 2 2021, 10:49:15) [GCC 9.4.0]
jinja version = 3.0.1
libyaml = True
Using /workspace/ansible.cfg as config file
host_list declined parsing /workspace/inventories/test/hosts.yml as it did not pass its verify_file() method
script declined parsing /workspace/inventories/test/hosts.yml as it did not pass its verify_file() method
Parsed /workspace/inventories/test/hosts.yml inventory source with yaml plugin
Skipping callback 'default', as we already have a stdout callback.
Skipping callback 'minimal', as we already have a stdout callback.
Skipping callback 'oneline', as we already have a stdout callback.
PLAYBOOK: test.yml *************************************************************
1 plays in test.yml
PLAY [gitlab_runners] **********************************************************
TASK [Gathering Facts] *********************************************************
task path: /workspace/test.yml:2
Thursday 30 September 2021 14:39:02 +0000 (0:00:00.044) 0:00:00.044 ****
Using module file /usr/local/lib/python3.8/dist-packages/ansible/modules/setup.py
Pipelining is enabled.
<10.1.1.201> ESTABLISH SSH CONNECTION FOR USER: manager
<10.1.1.201> SSH: EXEC sshpass -d10 ssh -C -o ControlMaster=auto -o ControlPersist=60s -o StrictHostKeyChecking=no -o 'User="manager"' -o ConnectTimeout=10 -o StrictHostKeyChecking=no -o ControlPath=/root/.ansible/cp/48372ad7a3 10.1.1.201 '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=emwqvsoxlkrjzgibgvumxuqwnsbstafh] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-emwqvsoxlkrjzgibgvumxuqwnsbstafh ; python3'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<10.1.1.201> (0, b'\n{"ansible_facts": {"ansible_system": "Linux", "ansible_kernel": "5.4.0-86-generic", "ansible_kernel_version": "#97-Ubuntu SMP Fri Sep 17 19:19:40 UTC 2021", "ansible_machine": "x86_64", "ansible_python_version": "3.8.10", "ansible_fqdn": "test1", "ansible_hostname": "test1", "ansible_nodename": "test1", "ansible_domain": "", "ansible_userspace_bits": "64", "ansible_architecture": "x86_64", "ansible_userspace_architecture": "x86_64", "ansible_machine_id": "c47dd6e16b184c00a0226434c8d1b693", "ansible_user_id": "root", "ansible_user_uid": 0, "ansible_user_gid": 0, "ansible_user_gecos": "root", "ansible_user_dir": "/root", "ansible_user_shell": "/bin/bash", "ansible_real_user_id": 0, "ansible_effective_user_id": 0, "ansible_real_group_id": 0, "ansible_effective_group_id": 0, "ansible_python": {"version": {"major": 3, "minor": 8, "micro": 10, "releaselevel": "final", "serial": 0}, "version_info": [3, 8, 10, "final", 0], "executable": "/usr/bin/python3", "has_sslcontext": true, "type": "cpython"}, "ansible_fibre_channel_wwn": [], "ansible_is_chroot": false, "ansible_dns": {"nameservers": ["127.0.0.53"], "options": {"edns0": true, "trust-ad": true}, "search": ["frauscher.intern"]}, "ansible_processor": ["0", "GenuineIntel", "Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.60GHz", "1", "GenuineIntel", "Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.60GHz"], "ansible_processor_count": 1, "ansible_processor_cores": 2, "ansible_processor_threads_per_core": 1, "ansible_processor_vcpus": 2, "ansible_processor_nproc": 2, "ansible_memtotal_mb": 1987, "ansible_memfree_mb": 1459, "ansible_swaptotal_mb": 0, "ansible_swapfree_mb": 0, "ansible_memory_mb": {"real": {"total": 1987, "used": 528, "free": 1459}, "nocache": {"free": 1790, "used": 197}, "swap": {"total": 0, "free": 0, "used": 0, "cached": 0}}, "ansible_bios_date": "12/01/2006", "ansible_bios_vendor": "innotek GmbH", "ansible_bios_version": "VirtualBox", "ansible_board_asset_tag": "NA", "ansible_board_name": "VirtualBox", "ansible_board_serial": "0", "ansible_board_vendor": "Oracle Corporation", "ansible_board_version": "1.2", "ansible_chassis_asset_tag": "NA", "ansible_chassis_serial": "NA", "ansible_chassis_vendor": "Oracle Corporation", "ansible_chassis_version": "NA", "ansible_form_factor": "Other", "ansible_product_name": "VirtualBox", "ansible_product_serial": "0", "ansible_product_uuid": "e0eea82f-53ae-3447-bc29-87fb43fa6de8", "ansible_product_version": "1.2", "ansible_system_vendor": "innotek GmbH", "ansible_devices": {"loop1": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "4096", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "137728", "sectorsize": "512", "size": "67.25 MB", "host": "", "holders": []}, "sdb": {"virtual": 1, "links": {"ids": [], "uuids": ["2021-09-23-22-20-32-00"], "labels": ["cidata"], "masters": []}, "vendor": "VBOX", "model": "HARDDISK", "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "20480", "sectorsize": "512", "size": "10.00 MB", "host": "SCSI storage controller: Broadcom / LSI 53c1030 PCI-X Fusion-MPT Dual Ultra320 SCSI", "holders": []}, "loop6": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}, "loop4": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}, "loop2": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "4096", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "66168", "sectorsize": "512", "size": "32.31 MB", "host": "", "holders": []}, "loop0": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "4096", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "126504", "sectorsize": "512", "size": "61.77 MB", "host": "", "holders": []}, "loop7": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}, "sda": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": "VBOX", "model": "HARDDISK", "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {"sda1": {"links": {"ids": [], "uuids": ["94aefc52-4039-4b4a-8b2a-65124174d9b9"], "labels": ["cloudimg-rootfs"], "masters": []}, "start": "2048", "sectors": "83883999", "sectorsize": 512, "size": "40.00 GB", "uuid": "94aefc52-4039-4b4a-8b2a-65124174d9b9", "holders": []}}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "83886080", "sectorsize": "512", "size": "40.00 GB", "host": "SCSI storage controller: Broadcom / LSI 53c1030 PCI-X Fusion-MPT Dual Ultra320 SCSI", "holders": []}, "loop5": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "0", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}, "loop3": {"virtual": 1, "links": {"ids": [], "uuids": [], "labels": [], "masters": []}, "vendor": null, "model": null, "sas_address": null, "sas_device_handle": null, "removable": "0", "support_discard": "4096", "partitions": {}, "rotational": "1", "scheduler_mode": "mq-deadline", "sectors": "0", "sectorsize": "512", "size": "0.00 Bytes", "host": "", "holders": []}}, "ansible_device_links": {"ids": {}, "uuids": {"sda1": ["94aefc52-4039-4b4a-8b2a-65124174d9b9"], "sdb": ["2021-09-23-22-20-32-00"]}, "labels": {"sda1": ["cloudimg-rootfs"], "sdb": ["cidata"]}, "masters": {}}, "ansible_uptime_seconds": 319, "ansible_lvm": {"lvs": {}, "vgs": {}, "pvs": {}}, "ansible_mounts": [{"mount": "/", "device": "/dev/sda1", "fstype": "ext4", "options": "rw,relatime", "size_total": 41567858688, "size_available": 40172617728, "block_size": 4096, "block_total": 10148403, "block_available": 9807768, "block_used": 340635, "inode_total": 5120000, "inode_available": 5047941, "inode_used": 72059, "uuid": "94aefc52-4039-4b4a-8b2a-65124174d9b9"}, {"mount": "/snap/core20/1081", "device": "/dev/loop0", "fstype": "squashfs", "options": "ro,nodev,relatime", "size_total": 64880640, "size_available": 0, "block_size": 131072, "block_total": 495, "block_available": 0, "block_used": 495, "inode_total": 11720, "inode_available": 0, "inode_used": 11720, "uuid": "N/A"}, {"mount": "/snap/lxd/21545", "device": "/dev/loop1", "fstype": "squashfs", "options": "ro,nodev,relatime", "size_total": 70516736, "size_available": 0, "block_size": 131072, "block_total": 538, "block_available": 0, "block_used": 538, "inode_total": 796, "inode_available": 0, "inode_used": 796, "uuid": "N/A"}, {"mount": "/snap/snapd/13170", "device": "/dev/loop2", "fstype": "squashfs", "options": "ro,nodev,relatime", "size_total": 33947648, "size_available": 0, "block_size": 131072, "block_total": 259, "block_available": 0, "block_used": 259, "inode_total": 474, "inode_available": 0, "inode_used": 474, "uuid": "N/A"}], "ansible_system_capabilities_enforced": "False", "ansible_system_capabilities": [], "ansible_ssh_host_key_dsa_public": "AAAAB3NzaC1kc3MAAACBANhTZfF+TbSqS3c3V0mYbiGOOHdB+wzCQlw73P/orjE3GaQQ3u3x4FA5RZcrfaBXKxgCatn4oTHbYN1vPhRECVvasTgZtyNP418lXaEsd4fJ7SLc0ju4BKaq8Pr5ppNSbcOEKru96ENnbHHrDi/XC7fKSpHijjGDhK86+HYs724/AAAAFQDGLonRqAKyuVczMoJLceaRShaCAwAAAIArdhTSkCW1LKS6II0NSonSQWeFb6f0X2kGVmfyvy9wFDgwdx5BkMEdaJBZrdIrROHQF+IPcK3jNfEAeJjfJ07YSFPiTQGzV6Y3clHchT/y6z0t0yHn+eP3w3OEjAPwRB4NAOdGsQdpg7Qs/sgSLjJ47EbLpf6P+ImfH7L8NMNB+wAAAIBIIpiFAEOa670brc4+aIJhObs3vuCKkmr9lOHBBqtMM/a8CQprG8PrbacZ3TRUBzBMTmWyFVSsniEpAhy1V7BHt7sgUBWu27A4fb1O94Vb1r7cXZ8BSKnT8TRmzdnxMdYPAiYhgQYF3uRfPB97AjBlDOaZGIug7quxtoM8+lg6YQ==", "ansible_ssh_host_key_dsa_public_keytype": "ssh-dss", "ansible_ssh_host_key_rsa_public": "AAAAB3NzaC1yc2EAAAADAQABAAABgQDYA8nuLROQD6tE5QAyK/7j+ewLYa+xOLFcDelQ2ack4gh8+2cM11oEvuz6QcL4xVKIOz308M8WK9x6dKGF2ujCaSEp2/DWXLbpCX/Rn6pB6OOeolq6Cop/OTE8M9tutCzyG1mr9UZZTkPks/j3bsIPPHjHNf/554BH4beG7zTlzARp4MABqkmTQ1XGGzZLxzRGKUUJaLo+6mxxf+3mcKKpzHTHiaGf20s57WXIbokJ1PJ4iwh/zdT93ebG7VwxxPcxDs+UwHcTJMnhXHpOwlXaBxUyawzOWGceWnpmlXs/aycqZ/5LbZf1BQp13WnDiLfuD1tzXCHq0Dl05cnMuOJhwSSb+13UZr1uxk7y8EUiYKKbOoamj7+8wvfi++3mGwHW1kokm1wWsu79EBxaVu7r0nTxs57JuCHFIUvDt63ajI0G4+U9Qhu7Kxi9NmNyoyv1JSPVXyv2dSZFQZwh6phzyyztirQSR4wbTzlEbmhKHESMUkhE0mgdMWajxzLUIXM=", "ansible_ssh_host_key_rsa_public_keytype": "ssh-rsa", "ansible_ssh_host_key_ecdsa_public": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOrElzJrdktqNP+A5xDLWb3KLPYOjn9q/0v0+/15rBMNgTvkQXyVhbV/b8OxEpjo7evCk4XNM6Zg17N5AxdDX7k=", "ansible_ssh_host_key_ecdsa_public_keytype": "ecdsa-sha2-nistp256", "ansible_ssh_host_key_ed25519_public": "AAAAC3NzaC1lZDI1NTE5AAAAICHStriNsa0rVUQpwU7t4cmg+DNXSPVS+csFo7nbnlHx", "ansible_ssh_host_key_ed25519_public_keytype": "ssh-ed25519", "ansible_iscsi_iqn": "", "ansible_fips": false, "ansible_distribution": "Ubuntu", "ansible_distribution_release": "focal", "ansible_distribution_version": "20.04", "ansible_distribution_major_version": "20", "ansible_distribution_file_path": "/etc/os-release", "ansible_distribution_file_variety": "Debian", "ansible_distribution_file_parsed": true, "ansible_os_family": "Debian", "ansible_virtualization_role": "guest", "ansible_virtualization_type": "virtualbox", "ansible_virtualization_tech_guest": ["virtualbox"], "ansible_virtualization_tech_host": [], "ansible_lsb": {"id": "Ubuntu", "description": "Ubuntu 20.04.3 LTS", "release": "20.04", "codename": "focal", "major_release": "20"}, "ansible_cmdline": {"BOOT_IMAGE": "/boot/vmlinuz-5.4.0-86-generic", "root": "UUID=94aefc52-4039-4b4a-8b2a-65124174d9b9", "ro": true, "console": "ttyS0"}, "ansible_proc_cmdline": {"BOOT_IMAGE": "/boot/vmlinuz-5.4.0-86-generic", "root": "UUID=94aefc52-4039-4b4a-8b2a-65124174d9b9", "ro": true, "console": ["tty1", "ttyS0"]}, "ansible_hostnqn": "", "ansible_local": {}, "ansible_env": {"SUDO_GID": "1900", "MAIL": "/var/mail/root", "USER": "root", "HOME": "/root", "SUDO_UID": "1900", "LOGNAME": "root", "TERM": "unknown", "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin", "LANG": "C.UTF-8", "SUDO_COMMAND": "/bin/sh -c echo BECOME-SUCCESS-emwqvsoxlkrjzgibgvumxuqwnsbstafh ; python3", "SHELL": "/bin/bash", "SUDO_USER": "manager", "PWD": "/home/manager"}, "ansible_interfaces": ["enp0s8", "enp0s3", "lo"], "ansible_enp0s8": {"device": "enp0s8", "macaddress": "08:00:27:67:8d:69", "mtu": 1500, "active": true, "module": "e1000", "type": "ether", "pciid": "0000:00:08.0", "speed": 1000, "promisc": false, "ipv4": {"address": "10.1.1.201", "broadcast": "10.1.1.255", "netmask": "255.255.255.0", "network": "10.1.1.0"}, "ipv6": [{"address": "fe80::a00:27ff:fe67:8d69", "prefix": "64", "scope": "link"}], "features": {"rx_checksumming": "off", "tx_checksumming": "on", "tx_checksum_ipv4": "off [fixed]", "tx_checksum_ip_generic": "on", "tx_checksum_ipv6": "off [fixed]", "tx_checksum_fcoe_crc": "off [fixed]", "tx_checksum_sctp": "off [fixed]", "scatter_gather": "on", "tx_scatter_gather": "on", "tx_scatter_gather_fraglist": "off [fixed]", "tcp_segmentation_offload": "on", "tx_tcp_segmentation": "on", "tx_tcp_ecn_segmentation": "off [fixed]", "tx_tcp_mangleid_segmentation": "off", "tx_tcp6_segmentation": "off [fixed]", "generic_segmentation_offload": "on", "generic_receive_offload": "on", "large_receive_offload": "off [fixed]", "rx_vlan_offload": "on", "tx_vlan_offload": "on [fixed]", "ntuple_filters": "off [fixed]", "receive_hashing": "off [fixed]", "highdma": "off [fixed]", "rx_vlan_filter": "on [fixed]", "vlan_challenged": "off [fixed]", "tx_lockless": "off [fixed]", "netns_local": "off [fixed]", "tx_gso_robust": "off [fixed]", "tx_fcoe_segmentation": "off [fixed]", "tx_gre_segmentation": "off [fixed]", "tx_gre_csum_segmentation": "off [fixed]", "tx_ipxip4_segmentation": "off [fixed]", "tx_ipxip6_segmentation": "off [fixed]", "tx_udp_tnl_segmentation": "off [fixed]", "tx_udp_tnl_csum_segmentation": "off [fixed]", "tx_gso_partial": "off [fixed]", "tx_sctp_segmentation": "off [fixed]", "tx_esp_segmentation": "off [fixed]", "tx_udp_segmentation": "off [fixed]", "fcoe_mtu": "off [fixed]", "tx_nocache_copy": "off", "loopback": "off [fixed]", "rx_fcs": "off", "rx_all": "off", "tx_vlan_stag_hw_insert": "off [fixed]", "rx_vlan_stag_hw_parse": "off [fixed]", "rx_vlan_stag_filter": "off [fixed]", "l2_fwd_offload": "off [fixed]", "hw_tc_offload": "off [fixed]", "esp_hw_offload": "off [fixed]", "esp_tx_csum_hw_offload": "off [fixed]", "rx_udp_tunnel_port_offload": "off [fixed]", "tls_hw_tx_offload": "off [fixed]", "tls_hw_rx_offload": "off [fixed]", "rx_gro_hw": "off [fixed]", "tls_hw_record": "off [fixed]"}, "timestamping": ["tx_software", "rx_software", "software"], "hw_timestamp_filters": []}, "ansible_enp0s3": {"device": "enp0s3", "macaddress": "02:de:69:7f:c7:01", "mtu": 1500, "active": true, "module": "e1000", "type": "ether", "pciid": "0000:00:03.0", "speed": 1000, "promisc": false, "ipv4": {"address": "10.0.2.15", "broadcast": "10.0.2.255", "netmask": "255.255.255.0", "network": "10.0.2.0"}, "ipv6": [{"address": "fe80::de:69ff:fe7f:c701", "prefix": "64", "scope": "link"}], "features": {"rx_checksumming": "off", "tx_checksumming": "on", "tx_checksum_ipv4": "off [fixed]", "tx_checksum_ip_generic": "on", "tx_checksum_ipv6": "off [fixed]", "tx_checksum_fcoe_crc": "off [fixed]", "tx_checksum_sctp": "off [fixed]", "scatter_gather": "on", "tx_scatter_gather": "on", "tx_scatter_gather_fraglist": "off [fixed]", "tcp_segmentation_offload": "on", "tx_tcp_segmentation": "on", "tx_tcp_ecn_segmentation": "off [fixed]", "tx_tcp_mangleid_segmentation": "off", "tx_tcp6_segmentation": "off [fixed]", "generic_segmentation_offload": "on", "generic_receive_offload": "on", "large_receive_offload": "off [fixed]", "rx_vlan_offload": "on", "tx_vlan_offload": "on [fixed]", "ntuple_filters": "off [fixed]", "receive_hashing": "off [fixed]", "highdma": "off [fixed]", "rx_vlan_filter": "on [fixed]", "vlan_challenged": "off [fixed]", "tx_lockless": "off [fixed]", "netns_local": "off [fixed]", "tx_gso_robust": "off [fixed]", "tx_fcoe_segmentation": "off [fixed]", "tx_gre_segmentation": "off [fixed]", "tx_gre_csum_segmentation": "off [fixed]", "tx_ipxip4_segmentation": "off [fixed]", "tx_ipxip6_segmentation": "off [fixed]", "tx_udp_tnl_segmentation": "off [fixed]", "tx_udp_tnl_csum_segmentation": "off [fixed]", "tx_gso_partial": "off [fixed]", "tx_sctp_segmentation": "off [fixed]", "tx_esp_segmentation": "off [fixed]", "tx_udp_segmentation": "off [fixed]", "fcoe_mtu": "off [fixed]", "tx_nocache_copy": "off", "loopback": "off [fixed]", "rx_fcs": "off", "rx_all": "off", "tx_vlan_stag_hw_insert": "off [fixed]", "rx_vlan_stag_hw_parse": "off [fixed]", "rx_vlan_stag_filter": "off [fixed]", "l2_fwd_offload": "off [fixed]", "hw_tc_offload": "off [fixed]", "esp_hw_offload": "off [fixed]", "esp_tx_csum_hw_offload": "off [fixed]", "rx_udp_tunnel_port_offload": "off [fixed]", "tls_hw_tx_offload": "off [fixed]", "tls_hw_rx_offload": "off [fixed]", "rx_gro_hw": "off [fixed]", "tls_hw_record": "off [fixed]"}, "timestamping": ["tx_software", "rx_software", "software"], "hw_timestamp_filters": []}, "ansible_lo": {"device": "lo", "mtu": 65536, "active": true, "type": "loopback", "promisc": false, "ipv4": {"address": "127.0.0.1", "broadcast": "", "netmask": "255.0.0.0", "network": "127.0.0.0"}, "ipv6": [{"address": "::1", "prefix": "128", "scope": "host"}], "features": {"rx_checksumming": "on [fixed]", "tx_checksumming": "on", "tx_checksum_ipv4": "off [fixed]", "tx_checksum_ip_generic": "on [fixed]", "tx_checksum_ipv6": "off [fixed]", "tx_checksum_fcoe_crc": "off [fixed]", "tx_checksum_sctp": "on [fixed]", "scatter_gather": "on", "tx_scatter_gather": "on [fixed]", "tx_scatter_gather_fraglist": "on [fixed]", "tcp_segmentation_offload": "on", "tx_tcp_segmentation": "on", "tx_tcp_ecn_segmentation": "on", "tx_tcp_mangleid_segmentation": "on", "tx_tcp6_segmentation": "on", "generic_segmentation_offload": "on", "generic_receive_offload": "on", "large_receive_offload": "off [fixed]", "rx_vlan_offload": "off [fixed]", "tx_vlan_offload": "off [fixed]", "ntuple_filters": "off [fixed]", "receive_hashing": "off [fixed]", "highdma": "on [fixed]", "rx_vlan_filter": "off [fixed]", "vlan_challenged": "on [fixed]", "tx_lockless": "on [fixed]", "netns_local": "on [fixed]", "tx_gso_robust": "off [fixed]", "tx_fcoe_segmentation": "off [fixed]", "tx_gre_segmentation": "off [fixed]", "tx_gre_csum_segmentation": "off [fixed]", "tx_ipxip4_segmentation": "off [fixed]", "tx_ipxip6_segmentation": "off [fixed]", "tx_udp_tnl_segmentation": "off [fixed]", "tx_udp_tnl_csum_segmentation": "off [fixed]", "tx_gso_partial": "off [fixed]", "tx_sctp_segmentation": "on", "tx_esp_segmentation": "off [fixed]", "tx_udp_segmentation": "off [fixed]", "fcoe_mtu": "off [fixed]", "tx_nocache_copy": "off [fixed]", "loopback": "on [fixed]", "rx_fcs": "off [fixed]", "rx_all": "off [fixed]", "tx_vlan_stag_hw_insert": "off [fixed]", "rx_vlan_stag_hw_parse": "off [fixed]", "rx_vlan_stag_filter": "off [fixed]", "l2_fwd_offload": "off [fixed]", "hw_tc_offload": "off [fixed]", "esp_hw_offload": "off [fixed]", "esp_tx_csum_hw_offload": "off [fixed]", "rx_udp_tunnel_port_offload": "off [fixed]", "tls_hw_tx_offload": "off [fixed]", "tls_hw_rx_offload": "off [fixed]", "rx_gro_hw": "off [fixed]", "tls_hw_record": "off [fixed]"}, "timestamping": ["tx_software", "rx_software", "software"], "hw_timestamp_filters": []}, "ansible_default_ipv4": {"gateway": "10.0.2.2", "interface": "enp0s3", "address": "10.0.2.15", "broadcast": "10.0.2.255", "netmask": "255.255.255.0", "network": "10.0.2.0", "macaddress": "02:de:69:7f:c7:01", "mtu": 1500, "type": "ether", "alias": "enp0s3"}, "ansible_default_ipv6": {}, "ansible_all_ipv4_addresses": ["10.1.1.201", "10.0.2.15"], "ansible_all_ipv6_addresses": ["fe80::a00:27ff:fe67:8d69", "fe80::de:69ff:fe7f:c701"], "ansible_selinux_python_present": true, "ansible_selinux": {"status": "disabled"}, "ansible_apparmor": {"status": "enabled"}, "ansible_date_time": {"year": "2021", "month": "09", "weekday": "Thursday", "weekday_number": "4", "weeknumber": "39", "day": "30", "hour": "14", "minute": "39", "second": "09", "epoch": "1633012749", "date": "2021-09-30", "time": "14:39:09", "iso8601_micro": "2021-09-30T14:39:09.397030Z", "iso8601": "2021-09-30T14:39:09Z", "iso8601_basic": "20210930T143909397030", "iso8601_basic_short": "20210930T143909", "tz": "UTC", "tz_dst": "UTC", "tz_offset": "+0000"}, "ansible_service_mgr": "systemd", "ansible_pkg_mgr": "apt", "gather_subset": ["all"], "module_setup": true}, "invocation": {"module_args": {"gather_subset": ["all"], "gather_timeout": 10, "filter": [], "fact_path": "/etc/ansible/facts.d"}}}\n', b"Warning: Permanently added '10.1.1.201' (ECDSA) to the list of known hosts.\r\n")
ok: [test1]
META: ran handlers
TASK [debug] *******************************************************************
task path: /workspace/test.yml:5
Thursday 30 September 2021 14:39:19 +0000 (0:00:17.449) 0:00:17.494 ****
ok: [test1] => {
"msg": "Success"
}
META: ran handlers
META: ran handlers
PLAY RECAP *********************************************************************
test1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
Thursday 30 September 2021 14:39:19 +0000 (0:00:00.040) 0:00:17.534 ****
===============================================================================
Gathering Facts -------------------------------------------------------- 17.45s
/workspace/test.yml:2 ---------------------------------------------------------
debug ------------------------------------------------------------------- 0.04s
/workspace/test.yml:5 ---------------------------------------------------------
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75834
|
https://github.com/ansible/ansible/pull/76732
|
9142be2f6cabbe6597c9254c5bb9186d17036d55
|
0ff80a15ba40c2aff3b96c1152f19c97a92d3c97
| 2021-09-30T08:41:28Z |
python
| 2022-01-13T21:28:09Z |
test/units/plugins/connection/test_ssh.py
|
# -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
import pytest
from ansible import constants as C
from ansible.errors import AnsibleAuthenticationFailure
from units.compat import unittest
from units.compat.mock import patch, MagicMock, PropertyMock
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.module_utils.compat.selectors import SelectorKey, EVENT_READ
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import ssh
from ansible.plugins.loader import connection_loader, become_loader
class TestConnectionBaseClass(unittest.TestCase):
def test_plugins_connection_ssh_module(self):
play_context = PlayContext()
play_context.prompt = (
'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
)
in_stream = StringIO()
self.assertIsInstance(ssh.Connection(play_context, in_stream), ssh.Connection)
def test_plugins_connection_ssh_basic(self):
pc = PlayContext()
new_stdin = StringIO()
conn = ssh.Connection(pc, new_stdin)
# connect just returns self, so assert that
res = conn._connect()
self.assertEqual(conn, res)
ssh.SSHPASS_AVAILABLE = False
self.assertFalse(conn._sshpass_available())
ssh.SSHPASS_AVAILABLE = True
self.assertTrue(conn._sshpass_available())
with patch('subprocess.Popen') as p:
ssh.SSHPASS_AVAILABLE = None
p.return_value = MagicMock()
self.assertTrue(conn._sshpass_available())
ssh.SSHPASS_AVAILABLE = None
p.return_value = None
p.side_effect = OSError()
self.assertFalse(conn._sshpass_available())
conn.close()
self.assertFalse(conn._connected)
def test_plugins_connection_ssh__build_command(self):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn.get_option = MagicMock()
conn.get_option.return_value = ""
conn._build_command('ssh', 'ssh')
def test_plugins_connection_ssh_exec_command(self):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn._build_command = MagicMock()
conn._build_command.return_value = 'ssh something something'
conn._run = MagicMock()
conn._run.return_value = (0, 'stdout', 'stderr')
conn.get_option = MagicMock()
conn.get_option.return_value = True
res, stdout, stderr = conn.exec_command('ssh')
res, stdout, stderr = conn.exec_command('ssh', 'this is some data')
def test_plugins_connection_ssh__examine_output(self):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn.set_become_plugin(become_loader.get('sudo'))
conn.become.check_password_prompt = MagicMock()
conn.become.check_success = MagicMock()
conn.become.check_incorrect_password = MagicMock()
conn.become.check_missing_password = MagicMock()
def _check_password_prompt(line):
if b'foo' in line:
return True
return False
def _check_become_success(line):
if b'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz' in line:
return True
return False
def _check_incorrect_password(line):
if b'incorrect password' in line:
return True
return False
def _check_missing_password(line):
if b'bad password' in line:
return True
return False
# test examining output for prompt
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = True
# override become plugin
conn.become.prompt = True
conn.become.check_password_prompt = MagicMock(side_effect=_check_password_prompt)
conn.become.check_success = MagicMock(side_effect=_check_become_success)
conn.become.check_incorrect_password = MagicMock(side_effect=_check_incorrect_password)
conn.become.check_missing_password = MagicMock(side_effect=_check_missing_password)
def get_option(option):
if option == 'become_pass':
return 'password'
return None
conn.become.get_option = get_option
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nfoo\nline 3\nthis should be the remainder', False)
self.assertEqual(output, b'line 1\nline 2\nline 3\n')
self.assertEqual(unprocessed, b'this should be the remainder')
self.assertTrue(conn._flags['become_prompt'])
self.assertFalse(conn._flags['become_success'])
self.assertFalse(conn._flags['become_error'])
self.assertFalse(conn._flags['become_nopasswd_error'])
# test examining output for become prompt
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = False
conn.become.prompt = False
pc.success_key = u'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz'
conn.become.success = u'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz'
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nBECOME-SUCCESS-abcdefghijklmnopqrstuvxyz\nline 3\n', False)
self.assertEqual(output, b'line 1\nline 2\nline 3\n')
self.assertEqual(unprocessed, b'')
self.assertFalse(conn._flags['become_prompt'])
self.assertTrue(conn._flags['become_success'])
self.assertFalse(conn._flags['become_error'])
self.assertFalse(conn._flags['become_nopasswd_error'])
# test examining output for become failure
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = False
conn.become.prompt = False
pc.success_key = None
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nincorrect password\n', True)
self.assertEqual(output, b'line 1\nline 2\nincorrect password\n')
self.assertEqual(unprocessed, b'')
self.assertFalse(conn._flags['become_prompt'])
self.assertFalse(conn._flags['become_success'])
self.assertTrue(conn._flags['become_error'])
self.assertFalse(conn._flags['become_nopasswd_error'])
# test examining output for missing password
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = False
conn.become.prompt = False
pc.success_key = None
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nbad password\n', True)
self.assertEqual(output, b'line 1\nbad password\n')
self.assertEqual(unprocessed, b'')
self.assertFalse(conn._flags['become_prompt'])
self.assertFalse(conn._flags['become_success'])
self.assertFalse(conn._flags['become_error'])
self.assertTrue(conn._flags['become_nopasswd_error'])
@patch('time.sleep')
@patch('os.path.exists')
def test_plugins_connection_ssh_put_file(self, mock_ospe, mock_sleep):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn._build_command = MagicMock()
conn._bare_run = MagicMock()
mock_ospe.return_value = True
conn._build_command.return_value = 'some command to run'
conn._bare_run.return_value = (0, '', '')
conn.host = "some_host"
conn.set_option('reconnection_retries', 9)
conn.set_option('ssh_transfer_method', None) # unless set to None scp_if_ssh is ignored
# Test with SCP_IF_SSH set to smart
# Test when SFTP works
conn.set_option('scp_if_ssh', 'smart')
expected_in_data = b' '.join((b'put', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# Test when SFTP doesn't work but SCP does
conn._bare_run.side_effect = [(1, 'stdout', 'some errors'), (0, '', '')]
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
conn._bare_run.side_effect = None
# test with SCP_IF_SSH enabled
conn.set_option('scp_if_ssh', True)
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
# test with SCPP_IF_SSH disabled
conn.set_option('scp_if_ssh', False)
expected_in_data = b' '.join((b'put', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
expected_in_data = b' '.join((b'put',
to_bytes(shlex_quote('/path/to/in/file/with/unicode-fö〩')),
to_bytes(shlex_quote('/path/to/dest/file/with/unicode-fö〩')))) + b'\n'
conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# test that a non-zero rc raises an error
conn._bare_run.return_value = (1, 'stdout', 'some errors')
self.assertRaises(AnsibleError, conn.put_file, '/path/to/bad/file', '/remote/path/to/file')
# test that a not-found path raises an error
mock_ospe.return_value = False
conn._bare_run.return_value = (0, 'stdout', '')
self.assertRaises(AnsibleFileNotFound, conn.put_file, '/path/to/bad/file', '/remote/path/to/file')
@patch('time.sleep')
def test_plugins_connection_ssh_fetch_file(self, mock_sleep):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn._build_command = MagicMock()
conn._bare_run = MagicMock()
conn._load_name = 'ssh'
conn._build_command.return_value = 'some command to run'
conn._bare_run.return_value = (0, '', '')
conn.host = "some_host"
conn.set_option('reconnection_retries', 9)
conn.set_option('ssh_transfer_method', None) # unless set to None scp_if_ssh is ignored
# Test with SCP_IF_SSH set to smart
# Test when SFTP works
conn.set_option('scp_if_ssh', 'smart')
expected_in_data = b' '.join((b'get', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.set_options({})
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# Test when SFTP doesn't work but SCP does
conn._bare_run.side_effect = [(1, 'stdout', 'some errors'), (0, '', '')]
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
# test with SCP_IF_SSH enabled
conn._bare_run.side_effect = None
conn.set_option('ssh_transfer_method', None) # unless set to None scp_if_ssh is ignored
conn.set_option('scp_if_ssh', 'True')
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
# test with SCP_IF_SSH disabled
conn.set_option('scp_if_ssh', False)
expected_in_data = b' '.join((b'get', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
expected_in_data = b' '.join((b'get',
to_bytes(shlex_quote('/path/to/in/file/with/unicode-fö〩')),
to_bytes(shlex_quote('/path/to/dest/file/with/unicode-fö〩')))) + b'\n'
conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# test that a non-zero rc raises an error
conn._bare_run.return_value = (1, 'stdout', 'some errors')
self.assertRaises(AnsibleError, conn.fetch_file, '/path/to/bad/file', '/remote/path/to/file')
class MockSelector(object):
def __init__(self):
self.files_watched = 0
self.register = MagicMock(side_effect=self._register)
self.unregister = MagicMock(side_effect=self._unregister)
self.close = MagicMock()
self.get_map = MagicMock(side_effect=self._get_map)
self.select = MagicMock()
def _register(self, *args, **kwargs):
self.files_watched += 1
def _unregister(self, *args, **kwargs):
self.files_watched -= 1
def _get_map(self, *args, **kwargs):
return self.files_watched
@pytest.fixture
def mock_run_env(request, mocker):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn.set_become_plugin(become_loader.get('sudo'))
conn._send_initial_data = MagicMock()
conn._examine_output = MagicMock()
conn._terminate_process = MagicMock()
conn._load_name = 'ssh'
conn.sshpass_pipe = [MagicMock(), MagicMock()]
request.cls.pc = pc
request.cls.conn = conn
mock_popen_res = MagicMock()
mock_popen_res.poll = MagicMock()
mock_popen_res.wait = MagicMock()
mock_popen_res.stdin = MagicMock()
mock_popen_res.stdin.fileno.return_value = 1000
mock_popen_res.stdout = MagicMock()
mock_popen_res.stdout.fileno.return_value = 1001
mock_popen_res.stderr = MagicMock()
mock_popen_res.stderr.fileno.return_value = 1002
mock_popen_res.returncode = 0
request.cls.mock_popen_res = mock_popen_res
mock_popen = mocker.patch('subprocess.Popen', return_value=mock_popen_res)
request.cls.mock_popen = mock_popen
request.cls.mock_selector = MockSelector()
mocker.patch('ansible.module_utils.compat.selectors.DefaultSelector', lambda: request.cls.mock_selector)
request.cls.mock_openpty = mocker.patch('pty.openpty')
mocker.patch('fcntl.fcntl')
mocker.patch('os.write')
mocker.patch('os.close')
@pytest.mark.usefixtures('mock_run_env')
class TestSSHConnectionRun(object):
# FIXME:
# These tests are little more than a smoketest. Need to enhance them
# a bit to check that they're calling the relevant functions and making
# complete coverage of the code paths
def test_no_escalation(self):
self.mock_popen_res.stdout.read.side_effect = [b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"my_stderr"]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
assert return_code == 0
assert b_stdout == b'my_stdout\nsecond_line'
assert b_stderr == b'my_stderr'
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
def test_with_password(self):
# test with a password set to trigger the sshpass write
self.pc.password = '12345'
self.mock_popen_res.stdout.read.side_effect = [b"some data", b"", b""]
self.mock_popen_res.stderr.read.side_effect = [b""]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run(["ssh", "is", "a", "cmd"], "this is more data")
assert return_code == 0
assert b_stdout == b'some data'
assert b_stderr == b''
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is more data'
def _password_with_prompt_examine_output(self, sourice, state, b_chunk, sudoable):
if state == 'awaiting_prompt':
self.conn._flags['become_prompt'] = True
elif state == 'awaiting_escalation':
self.conn._flags['become_success'] = True
return (b'', b'')
def test_password_with_prompt(self):
# test with password prompting enabled
self.pc.password = None
self.conn.become.prompt = b'Password:'
self.conn._examine_output.side_effect = self._password_with_prompt_examine_output
self.mock_popen_res.stdout.read.side_effect = [b"Password:", b"Success", b""]
self.mock_popen_res.stderr.read.side_effect = [b""]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ),
(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
assert return_code == 0
assert b_stdout == b''
assert b_stderr == b''
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
def test_password_with_become(self):
# test with some become settings
self.pc.prompt = b'Password:'
self.conn.become.prompt = b'Password:'
self.pc.become = True
self.pc.success_key = 'BECOME-SUCCESS-abcdefg'
self.conn.become._id = 'abcdefg'
self.conn._examine_output.side_effect = self._password_with_prompt_examine_output
self.mock_popen_res.stdout.read.side_effect = [b"Password:", b"BECOME-SUCCESS-abcdefg", b"abc"]
self.mock_popen_res.stderr.read.side_effect = [b"123"]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
self.mock_popen_res.stdin.flush.assert_called_once_with()
assert return_code == 0
assert b_stdout == b'abc'
assert b_stderr == b'123'
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
def test_pasword_without_data(self):
# simulate no data input but Popen using new pty's fails
self.mock_popen.return_value = None
self.mock_popen.side_effect = [OSError(), self.mock_popen_res]
# simulate no data input
self.mock_openpty.return_value = (98, 99)
self.mock_popen_res.stdout.read.side_effect = [b"some data", b"", b""]
self.mock_popen_res.stderr.read.side_effect = [b""]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "")
assert return_code == 0
assert b_stdout == b'some data'
assert b_stderr == b''
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is False
@pytest.mark.usefixtures('mock_run_env')
class TestSSHConnectionRetries(object):
def test_incorrect_password(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 5)
monkeypatch.setattr('time.sleep', lambda x: None)
self.mock_popen_res.stdout.read.side_effect = [b'']
self.mock_popen_res.stderr.read.side_effect = [b'Permission denied, please try again.\r\n']
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[5] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = [b'sshpass', b'-d41', b'ssh', b'-C']
exception_info = pytest.raises(AnsibleAuthenticationFailure, self.conn.exec_command, 'sshpass', 'some data')
assert exception_info.value.message == ('Invalid/incorrect username/password. Skipping remaining 5 retries to prevent account lockout: '
'Permission denied, please try again.')
assert self.mock_popen.call_count == 1
def test_retry_then_success(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 3)
monkeypatch.setattr('time.sleep', lambda x: None)
self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 3 + [0] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'ssh'
return_code, b_stdout, b_stderr = self.conn.exec_command('ssh', 'some data')
assert return_code == 0
assert b_stdout == b'my_stdout\nsecond_line'
assert b_stderr == b'my_stderr'
def test_multiple_failures(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 9)
monkeypatch.setattr('time.sleep', lambda x: None)
self.mock_popen_res.stdout.read.side_effect = [b""] * 10
self.mock_popen_res.stderr.read.side_effect = [b""] * 10
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 30)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
] * 10
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'ssh'
pytest.raises(AnsibleConnectionFailure, self.conn.exec_command, 'ssh', 'some data')
assert self.mock_popen.call_count == 10
def test_abitrary_exceptions(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 9)
monkeypatch.setattr('time.sleep', lambda x: None)
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'ssh'
self.mock_popen.side_effect = [Exception('bad')] * 10
pytest.raises(Exception, self.conn.exec_command, 'ssh', 'some data')
assert self.mock_popen.call_count == 10
def test_put_file_retries(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 3)
monkeypatch.setattr('time.sleep', lambda x: None)
monkeypatch.setattr('ansible.plugins.connection.ssh.os.path.exists', lambda x: True)
self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 4 + [0] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'sftp'
return_code, b_stdout, b_stderr = self.conn.put_file('/path/to/in/file', '/path/to/dest/file')
assert return_code == 0
assert b_stdout == b"my_stdout\nsecond_line"
assert b_stderr == b"my_stderr"
assert self.mock_popen.call_count == 2
def test_fetch_file_retries(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 3)
monkeypatch.setattr('time.sleep', lambda x: None)
monkeypatch.setattr('ansible.plugins.connection.ssh.os.path.exists', lambda x: True)
self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 4 + [0] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'sftp'
return_code, b_stdout, b_stderr = self.conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
assert return_code == 0
assert b_stdout == b"my_stdout\nsecond_line"
assert b_stderr == b"my_stderr"
assert self.mock_popen.call_count == 2
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 74,417 |
reboot fails after a long time with Ubuntu when verbosity set to connection debug
|
### Summary
When I perform a reboot with Ubuntu 18.04 or 20.04 remote host and with verbosity set to connection debug (-vvvv) reboot fails after a long time (several hours, much more than reboot_timeout). If I set it to debug (-vvv) it works like expected.
Tested with a Centos 7 remote host and it is not concerned by this bug.
This bug can be reproduced with devel branch but was discovered on 2.10 version
### Issue Type
Bug Report
### Component Name
reboot
### Ansible Version
```console
$ ansible --version
ansible 2.10.7
```
### Configuration
```console
$ ansible-config dump --only-changed
ANSIBLE_SSH_ARGS(ansible.cfg) = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null
HOST_KEY_CHECKING(ansible.cfg) = False
```
### OS / Environment
Control Node: Ubuntu 20.04
Remote node: Ubuntu 20.04 and 18.04
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: "Dumb playbook"
hosts:
- My-Remote-Host
gather_facts: false
tasks:
- name: Reboot
reboot:
become: True
become_method: sudo
```
### Expected Results
```
<X.X.X.X> SSH: EXEC sshpass -d12 ssh -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a -tt X.X.X.X '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=tzyslkziuuaepfcbrlaamnnnwqdftlts] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-tzyslkziuuaepfcbrlaamnnnwqdftlts ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<X.X.X.X> (0, b'\r\n9eba9e53-7bd2-4178-8d58-65f090990e83\r\n', b"Warning: Permanently added 'X.X.X.X' (ECDSA) to the list of known hosts.\r\nShared connection to X.X.X.X closed.\r\n")
reboot: attempting post-reboot test command
<X.X.X.X> ESTABLISH SSH CONNECTION FOR USER: host-user
<X.X.X.X> SSH: EXEC sshpass -d12 ssh -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a -tt X.X.X.X '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=mqdzbdqgyzzzqykxxumonzhvcptnfnqa] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-mqdzbdqgyzzzqykxxumonzhvcptnfnqa ; whoami'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<X.X.X.X> (0, b'\r\nroot\r\n', b'Shared connection to X.X.X.X closed.\r\n')
reboot: system successfully rebooted
<X.X.X.X> ESTABLISH SSH CONNECTION FOR USER: host-user
<X.X.X.X> SSH: EXEC sshpass -d12 ssh -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a X.X.X.X '/bin/sh -c '"'"'rm -f -r /home/host-user/.ansible/tmp/ansible-tmp-1619445760.4098256-1763116-260472738004687/ > /dev/null 2>&1 && sleep 0'"'"''
<X.X.X.X> (0, b'', b'')
changed: [My-Remote-Host] => {
"changed": true,
"elapsed": 21,
"rebooted": true
}
META: ran handlers
META: ran handlers
```
### Actual Results
```console
<X.X.X.X> SSH: EXEC sshpass -d10 ssh -vvv -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a -tt X.X.X.X '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=ptpwkfkjmbkbufkguzcinvxkirsrvysf] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-ptpwkfkjmbkbufkguzcinvxkirsrvysf ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"' && sleep 0'"'"''
<X.X.X.X> (255, b'', b'OpenSSH_8.2p1 Ubuntu-4ubuntu0.1, OpenSSL 1.1.1f 31 Mar 2020\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files\r\ndebug1: /etc/ssh/ssh_config line 21: Applying options for *\r\ndebug2: resolve_canonicalize: hostname X.X.X.X is address\r\ndebug1: auto-mux: Trying existing master\r\ndebug1: Control socket "~/.ansible/cp/02ec46349a" does not exist\r\ndebug2: ssh_connect_direct\r\ndebug1: Connecting to X.X.X.X [X.X.X.X] port 22.\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: connect to address X.X.X.X port 22: Connection refused\r\nssh: connect to host X.X.X.X port 22: Connection refused\r\n')
sending connection check: [b'sshpass', b'-d10', b'ssh', b'-vvv', b'-o', b'ControlMaster=auto', b'-o', b'ControlPersist=60s', b'-o', b'UserKnownHostsFile=/dev/null', b'-o', b'StrictHostKeyChecking=no', b'-o', b'Port=22', b'-o', b'User="host-user"', b'-o', b'ConnectTimeout=10', b'-o', b'ControlPath=~/.ansible/cp/02ec46349a', b'-O', b'check', b'X.X.X.X']
No connection to reset: OpenSSH_8.2p1 Ubuntu-4ubuntu0.1, OpenSSL 1.1.1f 31 Mar 2020
debug1: Reading configuration data /etc/ssh/ssh_config
debug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files
debug1: /etc/ssh/ssh_config line 21: Applying options for *
debug2: resolve_canonicalize: hostname X.X.X.X is address
debug1: auto-mux: Trying existing master
Control socket connect(~/.ansible/cp/02ec46349a): No such file or directory
reboot: attempting to get system boot time
<X.X.X.X> ESTABLISH SSH CONNECTION FOR USER: host-user
<X.X.X.X> SSH: EXEC sshpass -d12 ssh -vvv -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a -tt X.X.X.X '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=bpysdtsdgvijqvsmtxxbnykpmxgrhqho] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-bpysdtsdgvijqvsmtxxbnykpmxgrhqho ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/74417
|
https://github.com/ansible/ansible/pull/76732
|
9142be2f6cabbe6597c9254c5bb9186d17036d55
|
0ff80a15ba40c2aff3b96c1152f19c97a92d3c97
| 2021-04-26T14:23:59Z |
python
| 2022-01-13T21:28:09Z |
changelogs/fragments/ssh_debug_noparse.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 74,417 |
reboot fails after a long time with Ubuntu when verbosity set to connection debug
|
### Summary
When I perform a reboot with Ubuntu 18.04 or 20.04 remote host and with verbosity set to connection debug (-vvvv) reboot fails after a long time (several hours, much more than reboot_timeout). If I set it to debug (-vvv) it works like expected.
Tested with a Centos 7 remote host and it is not concerned by this bug.
This bug can be reproduced with devel branch but was discovered on 2.10 version
### Issue Type
Bug Report
### Component Name
reboot
### Ansible Version
```console
$ ansible --version
ansible 2.10.7
```
### Configuration
```console
$ ansible-config dump --only-changed
ANSIBLE_SSH_ARGS(ansible.cfg) = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null
HOST_KEY_CHECKING(ansible.cfg) = False
```
### OS / Environment
Control Node: Ubuntu 20.04
Remote node: Ubuntu 20.04 and 18.04
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: "Dumb playbook"
hosts:
- My-Remote-Host
gather_facts: false
tasks:
- name: Reboot
reboot:
become: True
become_method: sudo
```
### Expected Results
```
<X.X.X.X> SSH: EXEC sshpass -d12 ssh -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a -tt X.X.X.X '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=tzyslkziuuaepfcbrlaamnnnwqdftlts] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-tzyslkziuuaepfcbrlaamnnnwqdftlts ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<X.X.X.X> (0, b'\r\n9eba9e53-7bd2-4178-8d58-65f090990e83\r\n', b"Warning: Permanently added 'X.X.X.X' (ECDSA) to the list of known hosts.\r\nShared connection to X.X.X.X closed.\r\n")
reboot: attempting post-reboot test command
<X.X.X.X> ESTABLISH SSH CONNECTION FOR USER: host-user
<X.X.X.X> SSH: EXEC sshpass -d12 ssh -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a -tt X.X.X.X '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=mqdzbdqgyzzzqykxxumonzhvcptnfnqa] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-mqdzbdqgyzzzqykxxumonzhvcptnfnqa ; whoami'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<X.X.X.X> (0, b'\r\nroot\r\n', b'Shared connection to X.X.X.X closed.\r\n')
reboot: system successfully rebooted
<X.X.X.X> ESTABLISH SSH CONNECTION FOR USER: host-user
<X.X.X.X> SSH: EXEC sshpass -d12 ssh -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a X.X.X.X '/bin/sh -c '"'"'rm -f -r /home/host-user/.ansible/tmp/ansible-tmp-1619445760.4098256-1763116-260472738004687/ > /dev/null 2>&1 && sleep 0'"'"''
<X.X.X.X> (0, b'', b'')
changed: [My-Remote-Host] => {
"changed": true,
"elapsed": 21,
"rebooted": true
}
META: ran handlers
META: ran handlers
```
### Actual Results
```console
<X.X.X.X> SSH: EXEC sshpass -d10 ssh -vvv -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a -tt X.X.X.X '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=ptpwkfkjmbkbufkguzcinvxkirsrvysf] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-ptpwkfkjmbkbufkguzcinvxkirsrvysf ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"' && sleep 0'"'"''
<X.X.X.X> (255, b'', b'OpenSSH_8.2p1 Ubuntu-4ubuntu0.1, OpenSSL 1.1.1f 31 Mar 2020\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files\r\ndebug1: /etc/ssh/ssh_config line 21: Applying options for *\r\ndebug2: resolve_canonicalize: hostname X.X.X.X is address\r\ndebug1: auto-mux: Trying existing master\r\ndebug1: Control socket "~/.ansible/cp/02ec46349a" does not exist\r\ndebug2: ssh_connect_direct\r\ndebug1: Connecting to X.X.X.X [X.X.X.X] port 22.\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: connect to address X.X.X.X port 22: Connection refused\r\nssh: connect to host X.X.X.X port 22: Connection refused\r\n')
sending connection check: [b'sshpass', b'-d10', b'ssh', b'-vvv', b'-o', b'ControlMaster=auto', b'-o', b'ControlPersist=60s', b'-o', b'UserKnownHostsFile=/dev/null', b'-o', b'StrictHostKeyChecking=no', b'-o', b'Port=22', b'-o', b'User="host-user"', b'-o', b'ConnectTimeout=10', b'-o', b'ControlPath=~/.ansible/cp/02ec46349a', b'-O', b'check', b'X.X.X.X']
No connection to reset: OpenSSH_8.2p1 Ubuntu-4ubuntu0.1, OpenSSL 1.1.1f 31 Mar 2020
debug1: Reading configuration data /etc/ssh/ssh_config
debug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files
debug1: /etc/ssh/ssh_config line 21: Applying options for *
debug2: resolve_canonicalize: hostname X.X.X.X is address
debug1: auto-mux: Trying existing master
Control socket connect(~/.ansible/cp/02ec46349a): No such file or directory
reboot: attempting to get system boot time
<X.X.X.X> ESTABLISH SSH CONNECTION FOR USER: host-user
<X.X.X.X> SSH: EXEC sshpass -d12 ssh -vvv -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a -tt X.X.X.X '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=bpysdtsdgvijqvsmtxxbnykpmxgrhqho] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-bpysdtsdgvijqvsmtxxbnykpmxgrhqho ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/74417
|
https://github.com/ansible/ansible/pull/76732
|
9142be2f6cabbe6597c9254c5bb9186d17036d55
|
0ff80a15ba40c2aff3b96c1152f19c97a92d3c97
| 2021-04-26T14:23:59Z |
python
| 2022-01-13T21:28:09Z |
lib/ansible/plugins/connection/ssh.py
|
# Copyright (c) 2012, Michael DeHaan <[email protected]>
# Copyright 2015 Abhijit Menon-Sen <[email protected]>
# Copyright 2017 Toshio Kuratomi <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: ssh
short_description: connect via SSH client binary
description:
- This connection plugin allows Ansible to communicate to the target machines through normal SSH command line.
- Ansible does not expose a channel to allow communication between the user and the SSH process to accept
a password manually to decrypt an SSH key when using this connection plugin (which is the default). The
use of C(ssh-agent) is highly recommended.
author: ansible (@core)
extends_documentation_fragment:
- connection_pipelining
version_added: historical
notes:
- Many options default to C(None) here but that only means we do not override the SSH tool's defaults and/or configuration.
For example, if you specify the port in this plugin it will override any C(Port) entry in your C(.ssh/config).
options:
host:
description: Hostname/IP to connect to.
vars:
- name: inventory_hostname
- name: ansible_host
- name: ansible_ssh_host
- name: delegated_vars['ansible_host']
- name: delegated_vars['ansible_ssh_host']
host_key_checking:
description: Determines if SSH should check host keys.
default: True
type: boolean
ini:
- section: defaults
key: 'host_key_checking'
- section: ssh_connection
key: 'host_key_checking'
version_added: '2.5'
env:
- name: ANSIBLE_HOST_KEY_CHECKING
- name: ANSIBLE_SSH_HOST_KEY_CHECKING
version_added: '2.5'
vars:
- name: ansible_host_key_checking
version_added: '2.5'
- name: ansible_ssh_host_key_checking
version_added: '2.5'
password:
description: Authentication password for the C(remote_user). Can be supplied as CLI option.
vars:
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_ssh_password
sshpass_prompt:
description:
- Password prompt that sshpass should search for. Supported by sshpass 1.06 and up.
- Defaults to C(Enter PIN for) when pkcs11_provider is set.
default: ''
ini:
- section: 'ssh_connection'
key: 'sshpass_prompt'
env:
- name: ANSIBLE_SSHPASS_PROMPT
vars:
- name: ansible_sshpass_prompt
version_added: '2.10'
ssh_args:
description: Arguments to pass to all SSH CLI tools.
default: '-C -o ControlMaster=auto -o ControlPersist=60s'
ini:
- section: 'ssh_connection'
key: 'ssh_args'
env:
- name: ANSIBLE_SSH_ARGS
vars:
- name: ansible_ssh_args
version_added: '2.7'
ssh_common_args:
description: Common extra args for all SSH CLI tools.
ini:
- section: 'ssh_connection'
key: 'ssh_common_args'
version_added: '2.7'
env:
- name: ANSIBLE_SSH_COMMON_ARGS
version_added: '2.7'
vars:
- name: ansible_ssh_common_args
cli:
- name: ssh_common_args
default: ''
ssh_executable:
default: ssh
description:
- This defines the location of the SSH binary. It defaults to C(ssh) which will use the first SSH binary available in $PATH.
- This option is usually not required, it might be useful when access to system SSH is restricted,
or when using SSH wrappers to connect to remote hosts.
env: [{name: ANSIBLE_SSH_EXECUTABLE}]
ini:
- {key: ssh_executable, section: ssh_connection}
#const: ANSIBLE_SSH_EXECUTABLE
version_added: "2.2"
vars:
- name: ansible_ssh_executable
version_added: '2.7'
sftp_executable:
default: sftp
description:
- This defines the location of the sftp binary. It defaults to C(sftp) which will use the first binary available in $PATH.
env: [{name: ANSIBLE_SFTP_EXECUTABLE}]
ini:
- {key: sftp_executable, section: ssh_connection}
version_added: "2.6"
vars:
- name: ansible_sftp_executable
version_added: '2.7'
scp_executable:
default: scp
description:
- This defines the location of the scp binary. It defaults to C(scp) which will use the first binary available in $PATH.
env: [{name: ANSIBLE_SCP_EXECUTABLE}]
ini:
- {key: scp_executable, section: ssh_connection}
version_added: "2.6"
vars:
- name: ansible_scp_executable
version_added: '2.7'
scp_extra_args:
description: Extra exclusive to the C(scp) CLI
vars:
- name: ansible_scp_extra_args
env:
- name: ANSIBLE_SCP_EXTRA_ARGS
version_added: '2.7'
ini:
- key: scp_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: scp_extra_args
default: ''
sftp_extra_args:
description: Extra exclusive to the C(sftp) CLI
vars:
- name: ansible_sftp_extra_args
env:
- name: ANSIBLE_SFTP_EXTRA_ARGS
version_added: '2.7'
ini:
- key: sftp_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: sftp_extra_args
default: ''
ssh_extra_args:
description: Extra exclusive to the SSH CLI.
vars:
- name: ansible_ssh_extra_args
env:
- name: ANSIBLE_SSH_EXTRA_ARGS
version_added: '2.7'
ini:
- key: ssh_extra_args
section: ssh_connection
version_added: '2.7'
cli:
- name: ssh_extra_args
default: ''
reconnection_retries:
description: Number of attempts to connect.
default: 0
type: integer
env:
- name: ANSIBLE_SSH_RETRIES
ini:
- section: connection
key: retries
- section: ssh_connection
key: retries
vars:
- name: ansible_ssh_retries
version_added: '2.7'
port:
description: Remote port to connect to.
type: int
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
- name: ansible_ssh_port
remote_user:
description:
- User name with which to login to the remote server, normally set by the remote_user keyword.
- If no user is supplied, Ansible will let the SSH client binary choose the user as it normally.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
- name: ansible_ssh_user
cli:
- name: user
pipelining:
env:
- name: ANSIBLE_PIPELINING
- name: ANSIBLE_SSH_PIPELINING
ini:
- section: defaults
key: pipelining
- section: connection
key: pipelining
- section: ssh_connection
key: pipelining
vars:
- name: ansible_pipelining
- name: ansible_ssh_pipelining
private_key_file:
description:
- Path to private key file to use for authentication.
ini:
- section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
- name: ansible_ssh_private_key_file
cli:
- name: private_key_file
option: '--private-key'
control_path:
description:
- This is the location to save SSH's ControlPath sockets, it uses SSH's variable substitution.
- Since 2.3, if null (default), ansible will generate a unique hash. Use ``%(directory)s`` to indicate where to use the control dir path setting.
- Before 2.3 it defaulted to ``control_path=%(directory)s/ansible-ssh-%%h-%%p-%%r``.
- Be aware that this setting is ignored if C(-o ControlPath) is set in ssh args.
env:
- name: ANSIBLE_SSH_CONTROL_PATH
ini:
- key: control_path
section: ssh_connection
vars:
- name: ansible_control_path
version_added: '2.7'
control_path_dir:
default: ~/.ansible/cp
description:
- This sets the directory to use for ssh control path if the control path setting is null.
- Also, provides the ``%(directory)s`` variable for the control path setting.
env:
- name: ANSIBLE_SSH_CONTROL_PATH_DIR
ini:
- section: ssh_connection
key: control_path_dir
vars:
- name: ansible_control_path_dir
version_added: '2.7'
sftp_batch_mode:
default: 'yes'
description: 'TODO: write it'
env: [{name: ANSIBLE_SFTP_BATCH_MODE}]
ini:
- {key: sftp_batch_mode, section: ssh_connection}
type: bool
vars:
- name: ansible_sftp_batch_mode
version_added: '2.7'
ssh_transfer_method:
description:
- "Preferred method to use when transferring files over ssh"
- Setting to 'smart' (default) will try them in order, until one succeeds or they all fail
- Using 'piped' creates an ssh pipe with C(dd) on either side to copy the data
choices: ['sftp', 'scp', 'piped', 'smart']
env: [{name: ANSIBLE_SSH_TRANSFER_METHOD}]
ini:
- {key: transfer_method, section: ssh_connection}
vars:
- name: ansible_ssh_transfer_method
version_added: '2.12'
scp_if_ssh:
deprecated:
why: In favor of the "ssh_transfer_method" option.
version: "2.17"
alternatives: ssh_transfer_method
default: smart
description:
- "Preferred method to use when transfering files over SSH."
- When set to I(smart), Ansible will try them until one succeeds or they all fail.
- If set to I(True), it will force 'scp', if I(False) it will use 'sftp'.
- This setting will overridden by ssh_transfer_method if set.
env: [{name: ANSIBLE_SCP_IF_SSH}]
ini:
- {key: scp_if_ssh, section: ssh_connection}
vars:
- name: ansible_scp_if_ssh
version_added: '2.7'
use_tty:
version_added: '2.5'
default: 'yes'
description: add -tt to ssh commands to force tty allocation.
env: [{name: ANSIBLE_SSH_USETTY}]
ini:
- {key: usetty, section: ssh_connection}
type: bool
vars:
- name: ansible_ssh_use_tty
version_added: '2.7'
timeout:
default: 10
description:
- This is the default ammount of time we will wait while establishing an SSH connection.
- It also controls how long we can wait to access reading the connection once established (select on the socket).
env:
- name: ANSIBLE_TIMEOUT
- name: ANSIBLE_SSH_TIMEOUT
version_added: '2.11'
ini:
- key: timeout
section: defaults
- key: timeout
section: ssh_connection
version_added: '2.11'
vars:
- name: ansible_ssh_timeout
version_added: '2.11'
cli:
- name: timeout
type: integer
pkcs11_provider:
version_added: '2.12'
default: ""
description:
- "PKCS11 SmartCard provider such as opensc, example: /usr/local/lib/opensc-pkcs11.so"
- Requires sshpass version 1.06+, sshpass must support the -P option.
env: [{name: ANSIBLE_PKCS11_PROVIDER}]
ini:
- {key: pkcs11_provider, section: ssh_connection}
vars:
- name: ansible_ssh_pkcs11_provider
'''
import errno
import fcntl
import hashlib
import os
import pty
import re
import shlex
import subprocess
import time
from functools import wraps
from ansible.errors import (
AnsibleAuthenticationFailure,
AnsibleConnectionFailure,
AnsibleError,
AnsibleFileNotFound,
)
from ansible.errors import AnsibleOptionsError
from ansible.module_utils.compat import selectors
from ansible.module_utils.six import PY3, text_type, binary_type
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.parsing.convert_bool import BOOLEANS, boolean
from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.plugins.shell.powershell import _parse_clixml
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath, makedirs_safe
display = Display()
b_NOT_SSH_ERRORS = (b'Traceback (most recent call last):', # Python-2.6 when there's an exception
# while invoking a script via -m
b'PHP Parse error:', # Php always returns error 255
)
SSHPASS_AVAILABLE = None
class AnsibleControlPersistBrokenPipeError(AnsibleError):
''' ControlPersist broken pipe '''
pass
def _handle_error(remaining_retries, command, return_tuple, no_log, host, display=display):
# sshpass errors
if command == b'sshpass':
# Error 5 is invalid/incorrect password. Raise an exception to prevent retries from locking the account.
if return_tuple[0] == 5:
msg = 'Invalid/incorrect username/password. Skipping remaining {0} retries to prevent account lockout:'.format(remaining_retries)
if remaining_retries <= 0:
msg = 'Invalid/incorrect password:'
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip())
raise AnsibleAuthenticationFailure(msg)
# sshpass returns codes are 1-6. We handle 5 previously, so this catches other scenarios.
# No exception is raised, so the connection is retried - except when attempting to use
# sshpass_prompt with an sshpass that won't let us pass -P, in which case we fail loudly.
elif return_tuple[0] in [1, 2, 3, 4, 6]:
msg = 'sshpass error:'
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
details = to_native(return_tuple[2]).rstrip()
if "sshpass: invalid option -- 'P'" in details:
details = 'Installed sshpass version does not support customized password prompts. ' \
'Upgrade sshpass to use sshpass_prompt, or otherwise switch to ssh keys.'
raise AnsibleError('{0} {1}'.format(msg, details))
msg = '{0} {1}'.format(msg, details)
if return_tuple[0] == 255:
SSH_ERROR = True
for signature in b_NOT_SSH_ERRORS:
if signature in return_tuple[1]:
SSH_ERROR = False
break
if SSH_ERROR:
msg = "Failed to connect to the host via ssh:"
if no_log:
msg = '{0} <error censored due to no log>'.format(msg)
else:
msg = '{0} {1}'.format(msg, to_native(return_tuple[2]).rstrip())
raise AnsibleConnectionFailure(msg)
# For other errors, no exception is raised so the connection is retried and we only log the messages
if 1 <= return_tuple[0] <= 254:
msg = u"Failed to connect to the host via ssh:"
if no_log:
msg = u'{0} <error censored due to no log>'.format(msg)
else:
msg = u'{0} {1}'.format(msg, to_text(return_tuple[2]).rstrip())
display.vvv(msg, host=host)
def _ssh_retry(func):
"""
Decorator to retry ssh/scp/sftp in the case of a connection failure
Will retry if:
* an exception is caught
* ssh returns 255
Will not retry if
* sshpass returns 5 (invalid password, to prevent account lockouts)
* remaining_tries is < 2
* retries limit reached
"""
@wraps(func)
def wrapped(self, *args, **kwargs):
remaining_tries = int(self.get_option('reconnection_retries')) + 1
cmd_summary = u"%s..." % to_text(args[0])
conn_password = self.get_option('password') or self._play_context.password
for attempt in range(remaining_tries):
cmd = args[0]
if attempt != 0 and conn_password and isinstance(cmd, list):
# If this is a retry, the fd/pipe for sshpass is closed, and we need a new one
self.sshpass_pipe = os.pipe()
cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
try:
try:
return_tuple = func(self, *args, **kwargs)
# TODO: this should come from task
if self._play_context.no_log:
display.vvv(u'rc=%s, stdout and stderr censored due to no log' % return_tuple[0], host=self.host)
else:
display.vvv(return_tuple, host=self.host)
# 0 = success
# 1-254 = remote command return code
# 255 could be a failure from the ssh command itself
except (AnsibleControlPersistBrokenPipeError):
# Retry one more time because of the ControlPersist broken pipe (see #16731)
cmd = args[0]
if conn_password and isinstance(cmd, list):
# This is a retry, so the fd/pipe for sshpass is closed, and we need a new one
self.sshpass_pipe = os.pipe()
cmd[1] = b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')
display.vvv(u"RETRYING BECAUSE OF CONTROLPERSIST BROKEN PIPE")
return_tuple = func(self, *args, **kwargs)
remaining_retries = remaining_tries - attempt - 1
_handle_error(remaining_retries, cmd[0], return_tuple, self._play_context.no_log, self.host)
break
# 5 = Invalid/incorrect password from sshpass
except AnsibleAuthenticationFailure:
# Raising this exception, which is subclassed from AnsibleConnectionFailure, prevents further retries
raise
except (AnsibleConnectionFailure, Exception) as e:
if attempt == remaining_tries - 1:
raise
else:
pause = 2 ** attempt - 1
if pause > 30:
pause = 30
if isinstance(e, AnsibleConnectionFailure):
msg = u"ssh_retry: attempt: %d, ssh return code is 255. cmd (%s), pausing for %d seconds" % (attempt + 1, cmd_summary, pause)
else:
msg = (u"ssh_retry: attempt: %d, caught exception(%s) from cmd (%s), "
u"pausing for %d seconds" % (attempt + 1, to_text(e), cmd_summary, pause))
display.vv(msg, host=self.host)
time.sleep(pause)
continue
return return_tuple
return wrapped
class Connection(ConnectionBase):
''' ssh based connections '''
transport = 'ssh'
has_pipelining = True
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
# TODO: all should come from get_option(), but not might be set at this point yet
self.host = self._play_context.remote_addr
self.port = self._play_context.port
self.user = self._play_context.remote_user
self.control_path = None
self.control_path_dir = None
# Windows operates differently from a POSIX connection/shell plugin,
# we need to set various properties to ensure SSH on Windows continues
# to work
if getattr(self._shell, "_IS_WINDOWS", False):
self.has_native_async = True
self.always_pipeline_modules = True
self.module_implementation_preferences = ('.ps1', '.exe', '')
self.allow_executable = False
# The connection is created by running ssh/scp/sftp from the exec_command,
# put_file, and fetch_file methods, so we don't need to do any connection
# management here.
def _connect(self):
return self
@staticmethod
def _create_control_path(host, port, user, connection=None, pid=None):
'''Make a hash for the controlpath based on con attributes'''
pstring = '%s-%s-%s' % (host, port, user)
if connection:
pstring += '-%s' % connection
if pid:
pstring += '-%s' % to_text(pid)
m = hashlib.sha1()
m.update(to_bytes(pstring))
digest = m.hexdigest()
cpath = '%(directory)s/' + digest[:10]
return cpath
@staticmethod
def _sshpass_available():
global SSHPASS_AVAILABLE
# We test once if sshpass is available, and remember the result. It
# would be nice to use distutils.spawn.find_executable for this, but
# distutils isn't always available; shutils.which() is Python3-only.
if SSHPASS_AVAILABLE is None:
try:
p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
SSHPASS_AVAILABLE = True
except OSError:
SSHPASS_AVAILABLE = False
return SSHPASS_AVAILABLE
@staticmethod
def _persistence_controls(b_command):
'''
Takes a command array and scans it for ControlPersist and ControlPath
settings and returns two booleans indicating whether either was found.
This could be smarter, e.g. returning false if ControlPersist is 'no',
but for now we do it simple way.
'''
controlpersist = False
controlpath = False
for b_arg in (a.lower() for a in b_command):
if b'controlpersist' in b_arg:
controlpersist = True
elif b'controlpath' in b_arg:
controlpath = True
return controlpersist, controlpath
def _add_args(self, b_command, b_args, explanation):
"""
Adds arguments to the ssh command and displays a caller-supplied explanation of why.
:arg b_command: A list containing the command to add the new arguments to.
This list will be modified by this method.
:arg b_args: An iterable of new arguments to add. This iterable is used
more than once so it must be persistent (ie: a list is okay but a
StringIO would not)
:arg explanation: A text string containing explaining why the arguments
were added. It will be displayed with a high enough verbosity.
.. note:: This function does its work via side-effect. The b_command list has the new arguments appended.
"""
display.vvvvv(u'SSH: %s: (%s)' % (explanation, ')('.join(to_text(a) for a in b_args)), host=self.host)
b_command += b_args
def _build_command(self, binary, subsystem, *other_args):
'''
Takes a executable (ssh, scp, sftp or wrapper) and optional extra arguments and returns the remote command
wrapped in local ssh shell commands and ready for execution.
:arg binary: actual executable to use to execute command.
:arg subsystem: type of executable provided, ssh/sftp/scp, needed because wrappers for ssh might have diff names.
:arg other_args: dict of, value pairs passed as arguments to the ssh binary
'''
b_command = []
conn_password = self.get_option('password') or self._play_context.password
#
# First, the command to invoke
#
# If we want to use password authentication, we have to set up a pipe to
# write the password to sshpass.
pkcs11_provider = self.get_option("pkcs11_provider")
if conn_password or pkcs11_provider:
if not self._sshpass_available():
raise AnsibleError("to use the 'ssh' connection type with passwords or pkcs11_provider, you must install the sshpass program")
if not conn_password and pkcs11_provider:
raise AnsibleError("to use pkcs11_provider you must specify a password/pin")
self.sshpass_pipe = os.pipe()
b_command += [b'sshpass', b'-d' + to_bytes(self.sshpass_pipe[0], nonstring='simplerepr', errors='surrogate_or_strict')]
password_prompt = self.get_option('sshpass_prompt')
if not password_prompt and pkcs11_provider:
# Set default password prompt for pkcs11_provider to make it clear its a PIN
password_prompt = 'Enter PIN for '
if password_prompt:
b_command += [b'-P', to_bytes(password_prompt, errors='surrogate_or_strict')]
b_command += [to_bytes(binary, errors='surrogate_or_strict')]
#
# Next, additional arguments based on the configuration.
#
# pkcs11 mode allows the use of Smartcards or Yubikey devices
if conn_password and pkcs11_provider:
self._add_args(b_command,
(b"-o", b"KbdInteractiveAuthentication=no",
b"-o", b"PreferredAuthentications=publickey",
b"-o", b"PasswordAuthentication=no",
b'-o', to_bytes(u'PKCS11Provider=%s' % pkcs11_provider)),
u'Enable pkcs11')
# sftp batch mode allows us to correctly catch failed transfers, but can
# be disabled if the client side doesn't support the option. However,
# sftp batch mode does not prompt for passwords so it must be disabled
# if not using controlpersist and using sshpass
if subsystem == 'sftp' and self.get_option('sftp_batch_mode'):
if conn_password:
b_args = [b'-o', b'BatchMode=no']
self._add_args(b_command, b_args, u'disable batch mode for sshpass')
b_command += [b'-b', b'-']
if self._play_context.verbosity > 3:
b_command.append(b'-vvv')
# Next, we add ssh_args
ssh_args = self.get_option('ssh_args')
if ssh_args:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in
self._split_ssh_args(ssh_args)]
self._add_args(b_command, b_args, u"ansible.cfg set ssh_args")
# Now we add various arguments that have their own specific settings defined in docs above.
if self.get_option('host_key_checking') is False:
b_args = (b"-o", b"StrictHostKeyChecking=no")
self._add_args(b_command, b_args, u"ANSIBLE_HOST_KEY_CHECKING/host_key_checking disabled")
self.port = self.get_option('port')
if self.port is not None:
b_args = (b"-o", b"Port=" + to_bytes(self.port, nonstring='simplerepr', errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"ANSIBLE_REMOTE_PORT/remote_port/ansible_port set")
key = self.get_option('private_key_file')
if key:
b_args = (b"-o", b'IdentityFile="' + to_bytes(os.path.expanduser(key), errors='surrogate_or_strict') + b'"')
self._add_args(b_command, b_args, u"ANSIBLE_PRIVATE_KEY_FILE/private_key_file/ansible_ssh_private_key_file set")
if not conn_password:
self._add_args(
b_command, (
b"-o", b"KbdInteractiveAuthentication=no",
b"-o", b"PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
b"-o", b"PasswordAuthentication=no"
),
u"ansible_password/ansible_ssh_password not set"
)
self.user = self.get_option('remote_user')
if self.user:
self._add_args(
b_command,
(b"-o", b'User="%s"' % to_bytes(self.user, errors='surrogate_or_strict')),
u"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set"
)
timeout = self.get_option('timeout')
self._add_args(
b_command,
(b"-o", b"ConnectTimeout=" + to_bytes(timeout, errors='surrogate_or_strict', nonstring='simplerepr')),
u"ANSIBLE_TIMEOUT/timeout set"
)
# Add in any common or binary-specific arguments from the PlayContext
# (i.e. inventory or task settings or overrides on the command line).
for opt in (u'ssh_common_args', u'{0}_extra_args'.format(subsystem)):
attr = self.get_option(opt)
if attr is not None:
b_args = [to_bytes(a, errors='surrogate_or_strict') for a in self._split_ssh_args(attr)]
self._add_args(b_command, b_args, u"Set %s" % opt)
# Check if ControlPersist is enabled and add a ControlPath if one hasn't
# already been set.
controlpersist, controlpath = self._persistence_controls(b_command)
if controlpersist:
self._persistent = True
if not controlpath:
self.control_path_dir = self.get_option('control_path_dir')
cpdir = unfrackpath(self.control_path_dir)
b_cpdir = to_bytes(cpdir, errors='surrogate_or_strict')
# The directory must exist and be writable.
makedirs_safe(b_cpdir, 0o700)
if not os.access(b_cpdir, os.W_OK):
raise AnsibleError("Cannot write to ControlPath %s" % to_native(cpdir))
self.control_path = self.get_option('control_path')
if not self.control_path:
self.control_path = self._create_control_path(
self.host,
self.port,
self.user
)
b_args = (b"-o", b'ControlPath="%s"' % to_bytes(self.control_path % dict(directory=cpdir), errors='surrogate_or_strict'))
self._add_args(b_command, b_args, u"found only ControlPersist; added ControlPath")
# Finally, we add any caller-supplied extras.
if other_args:
b_command += [to_bytes(a) for a in other_args]
return b_command
def _send_initial_data(self, fh, in_data, ssh_process):
'''
Writes initial data to the stdin filehandle of the subprocess and closes
it. (The handle must be closed; otherwise, for example, "sftp -b -" will
just hang forever waiting for more commands.)
'''
display.debug(u'Sending initial data')
try:
fh.write(to_bytes(in_data))
fh.close()
except (OSError, IOError) as e:
# The ssh connection may have already terminated at this point, with a more useful error
# Only raise AnsibleConnectionFailure if the ssh process is still alive
time.sleep(0.001)
ssh_process.poll()
if getattr(ssh_process, 'returncode', None) is None:
raise AnsibleConnectionFailure(
'Data could not be sent to remote host "%s". Make sure this host can be reached '
'over ssh: %s' % (self.host, to_native(e)), orig_exc=e
)
display.debug(u'Sent initial data (%d bytes)' % len(in_data))
# Used by _run() to kill processes on failures
@staticmethod
def _terminate_process(p):
""" Terminate a process, ignoring errors """
try:
p.terminate()
except (OSError, IOError):
pass
# This is separate from _run() because we need to do the same thing for stdout
# and stderr.
def _examine_output(self, source, state, b_chunk, sudoable):
'''
Takes a string, extracts complete lines from it, tests to see if they
are a prompt, error message, etc., and sets appropriate flags in self.
Prompt and success lines are removed.
Returns the processed (i.e. possibly-edited) output and the unprocessed
remainder (to be processed with the next chunk) as strings.
'''
output = []
for b_line in b_chunk.splitlines(True):
display_line = to_text(b_line).rstrip('\r\n')
suppress_output = False
# display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, display_line))
if self.become.expect_prompt() and self.become.check_password_prompt(b_line):
display.debug(u"become_prompt: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_prompt'] = True
suppress_output = True
elif self.become.success and self.become.check_success(b_line):
display.debug(u"become_success: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_success'] = True
suppress_output = True
elif sudoable and self.become.check_incorrect_password(b_line):
display.debug(u"become_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_error'] = True
elif sudoable and self.become.check_missing_password(b_line):
display.debug(u"become_nopasswd_error: (source=%s, state=%s): '%s'" % (source, state, display_line))
self._flags['become_nopasswd_error'] = True
if not suppress_output:
output.append(b_line)
# The chunk we read was most likely a series of complete lines, but just
# in case the last line was incomplete (and not a prompt, which we would
# have removed from the output), we retain it to be processed with the
# next chunk.
remainder = b''
if output and not output[-1].endswith(b'\n'):
remainder = output[-1]
output = output[:-1]
return b''.join(output), remainder
def _bare_run(self, cmd, in_data, sudoable=True, checkrc=True):
'''
Starts the command and communicates with it until it ends.
'''
# We don't use _shell.quote as this is run on the controller and independent from the shell plugin chosen
display_cmd = u' '.join(shlex.quote(to_text(c)) for c in cmd)
display.vvv(u'SSH: EXEC {0}'.format(display_cmd), host=self.host)
# Start the given command. If we don't need to pipeline data, we can try
# to use a pseudo-tty (ssh will have been invoked with -tt). If we are
# pipelining data, or can't create a pty, we fall back to using plain
# old pipes.
p = None
if isinstance(cmd, (text_type, binary_type)):
cmd = to_bytes(cmd)
else:
cmd = list(map(to_bytes, cmd))
conn_password = self.get_option('password') or self._play_context.password
if not in_data:
try:
# Make sure stdin is a proper pty to avoid tcgetattr errors
master, slave = pty.openpty()
if PY3 and conn_password:
# pylint: disable=unexpected-keyword-arg
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
else:
p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdin = os.fdopen(master, 'wb', 0)
os.close(slave)
except (OSError, IOError):
p = None
if not p:
try:
if PY3 and conn_password:
# pylint: disable=unexpected-keyword-arg
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, pass_fds=self.sshpass_pipe)
else:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdin = p.stdin
except (OSError, IOError) as e:
raise AnsibleError('Unable to execute ssh command line on a controller due to: %s' % to_native(e))
# If we are using SSH password authentication, write the password into
# the pipe we opened in _build_command.
if conn_password:
os.close(self.sshpass_pipe[0])
try:
os.write(self.sshpass_pipe[1], to_bytes(conn_password) + b'\n')
except OSError as e:
# Ignore broken pipe errors if the sshpass process has exited.
if e.errno != errno.EPIPE or p.poll() is None:
raise
os.close(self.sshpass_pipe[1])
#
# SSH state machine
#
# Now we read and accumulate output from the running process until it
# exits. Depending on the circumstances, we may also need to write an
# escalation password and/or pipelined input to the process.
states = [
'awaiting_prompt', 'awaiting_escalation', 'ready_to_send', 'awaiting_exit'
]
# Are we requesting privilege escalation? Right now, we may be invoked
# to execute sftp/scp with sudoable=True, but we can request escalation
# only when using ssh. Otherwise we can send initial data straightaway.
state = states.index('ready_to_send')
if to_bytes(self.get_option('ssh_executable')) in cmd and sudoable:
prompt = getattr(self.become, 'prompt', None)
if prompt:
# We're requesting escalation with a password, so we have to
# wait for a password prompt.
state = states.index('awaiting_prompt')
display.debug(u'Initial state: %s: %s' % (states[state], to_text(prompt)))
elif self.become and self.become.success:
# We're requesting escalation without a password, so we have to
# detect success/failure before sending any initial data.
state = states.index('awaiting_escalation')
display.debug(u'Initial state: %s: %s' % (states[state], to_text(self.become.success)))
# We store accumulated stdout and stderr output from the process here,
# but strip any privilege escalation prompt/confirmation lines first.
# Output is accumulated into tmp_*, complete lines are extracted into
# an array, then checked and removed or copied to stdout or stderr. We
# set any flags based on examining the output in self._flags.
b_stdout = b_stderr = b''
b_tmp_stdout = b_tmp_stderr = b''
self._flags = dict(
become_prompt=False, become_success=False,
become_error=False, become_nopasswd_error=False
)
# select timeout should be longer than the connect timeout, otherwise
# they will race each other when we can't connect, and the connect
# timeout usually fails
timeout = 2 + self.get_option('timeout')
for fd in (p.stdout, p.stderr):
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
# TODO: bcoca would like to use SelectSelector() when open
# select is faster when filehandles is low and we only ever handle 1.
selector = selectors.DefaultSelector()
selector.register(p.stdout, selectors.EVENT_READ)
selector.register(p.stderr, selectors.EVENT_READ)
# If we can send initial data without waiting for anything, we do so
# before we start polling
if states[state] == 'ready_to_send' and in_data:
self._send_initial_data(stdin, in_data, p)
state += 1
try:
while True:
poll = p.poll()
events = selector.select(timeout)
# We pay attention to timeouts only while negotiating a prompt.
if not events:
# We timed out
if state <= states.index('awaiting_escalation'):
# If the process has already exited, then it's not really a
# timeout; we'll let the normal error handling deal with it.
if poll is not None:
break
self._terminate_process(p)
raise AnsibleError('Timeout (%ds) waiting for privilege escalation prompt: %s' % (timeout, to_native(b_stdout)))
# Read whatever output is available on stdout and stderr, and stop
# listening to the pipe if it's been closed.
for key, event in events:
if key.fileobj == p.stdout:
b_chunk = p.stdout.read()
if b_chunk == b'':
# stdout has been closed, stop watching it
selector.unregister(p.stdout)
# When ssh has ControlMaster (+ControlPath/Persist) enabled, the
# first connection goes into the background and we never see EOF
# on stderr. If we see EOF on stdout, lower the select timeout
# to reduce the time wasted selecting on stderr if we observe
# that the process has not yet existed after this EOF. Otherwise
# we may spend a long timeout period waiting for an EOF that is
# not going to arrive until the persisted connection closes.
timeout = 1
b_tmp_stdout += b_chunk
display.debug(u"stdout chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
elif key.fileobj == p.stderr:
b_chunk = p.stderr.read()
if b_chunk == b'':
# stderr has been closed, stop watching it
selector.unregister(p.stderr)
b_tmp_stderr += b_chunk
display.debug("stderr chunk (state=%s):\n>>>%s<<<\n" % (state, to_text(b_chunk)))
# We examine the output line-by-line until we have negotiated any
# privilege escalation prompt and subsequent success/error message.
# Afterwards, we can accumulate output without looking at it.
if state < states.index('ready_to_send'):
if b_tmp_stdout:
b_output, b_unprocessed = self._examine_output('stdout', states[state], b_tmp_stdout, sudoable)
b_stdout += b_output
b_tmp_stdout = b_unprocessed
if b_tmp_stderr:
b_output, b_unprocessed = self._examine_output('stderr', states[state], b_tmp_stderr, sudoable)
b_stderr += b_output
b_tmp_stderr = b_unprocessed
else:
b_stdout += b_tmp_stdout
b_stderr += b_tmp_stderr
b_tmp_stdout = b_tmp_stderr = b''
# If we see a privilege escalation prompt, we send the password.
# (If we're expecting a prompt but the escalation succeeds, we
# didn't need the password and can carry on regardless.)
if states[state] == 'awaiting_prompt':
if self._flags['become_prompt']:
display.debug(u'Sending become_password in response to prompt')
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
# On python3 stdin is a BufferedWriter, and we don't have a guarantee
# that the write will happen without a flush
stdin.flush()
self._flags['become_prompt'] = False
state += 1
elif self._flags['become_success']:
state += 1
# We've requested escalation (with or without a password), now we
# wait for an error message or a successful escalation.
if states[state] == 'awaiting_escalation':
if self._flags['become_success']:
display.vvv(u'Escalation succeeded')
self._flags['become_success'] = False
state += 1
elif self._flags['become_error']:
display.vvv(u'Escalation failed')
self._terminate_process(p)
self._flags['become_error'] = False
raise AnsibleError('Incorrect %s password' % self.become.name)
elif self._flags['become_nopasswd_error']:
display.vvv(u'Escalation requires password')
self._terminate_process(p)
self._flags['become_nopasswd_error'] = False
raise AnsibleError('Missing %s password' % self.become.name)
elif self._flags['become_prompt']:
# This shouldn't happen, because we should see the "Sorry,
# try again" message first.
display.vvv(u'Escalation prompt repeated')
self._terminate_process(p)
self._flags['become_prompt'] = False
raise AnsibleError('Incorrect %s password' % self.become.name)
# Once we're sure that the privilege escalation prompt, if any, has
# been dealt with, we can send any initial data and start waiting
# for output.
if states[state] == 'ready_to_send':
if in_data:
self._send_initial_data(stdin, in_data, p)
state += 1
# Now we're awaiting_exit: has the child process exited? If it has,
# and we've read all available output from it, we're done.
if poll is not None:
if not selector.get_map() or not events:
break
# We should not see further writes to the stdout/stderr file
# descriptors after the process has closed, set the select
# timeout to gather any last writes we may have missed.
timeout = 0
continue
# If the process has not yet exited, but we've already read EOF from
# its stdout and stderr (and thus no longer watching any file
# descriptors), we can just wait for it to exit.
elif not selector.get_map():
p.wait()
break
# Otherwise there may still be outstanding data to read.
finally:
selector.close()
# close stdin, stdout, and stderr after process is terminated and
# stdout/stderr are read completely (see also issues #848, #64768).
stdin.close()
p.stdout.close()
p.stderr.close()
if self.get_option('host_key_checking'):
if cmd[0] == b"sshpass" and p.returncode == 6:
raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support '
'this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
controlpersisterror = b'Bad configuration option: ControlPersist' in b_stderr or b'unknown configuration option: ControlPersist' in b_stderr
if p.returncode != 0 and controlpersisterror:
raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" '
'(or ssh_args in [ssh_connection] section of the config file) before running again')
# If we find a broken pipe because of ControlPersist timeout expiring (see #16731),
# we raise a special exception so that we can retry a connection.
controlpersist_broken_pipe = b'mux_client_hello_exchange: write packet: Broken pipe' in b_stderr
if p.returncode == 255:
additional = to_native(b_stderr)
if controlpersist_broken_pipe:
raise AnsibleControlPersistBrokenPipeError('Data could not be sent because of ControlPersist broken pipe: %s' % additional)
elif in_data and checkrc:
raise AnsibleConnectionFailure('Data could not be sent to remote host "%s". Make sure this host can be reached over ssh: %s'
% (self.host, additional))
return (p.returncode, b_stdout, b_stderr)
@_ssh_retry
def _run(self, cmd, in_data, sudoable=True, checkrc=True):
"""Wrapper around _bare_run that retries the connection
"""
return self._bare_run(cmd, in_data, sudoable=sudoable, checkrc=checkrc)
@_ssh_retry
def _file_transport_command(self, in_path, out_path, sftp_action):
# scp and sftp require square brackets for IPv6 addresses, but
# accept them for hostnames and IPv4 addresses too.
host = '[%s]' % self.host
smart_methods = ['sftp', 'scp', 'piped']
# Windows does not support dd so we cannot use the piped method
if getattr(self._shell, "_IS_WINDOWS", False):
smart_methods.remove('piped')
# Transfer methods to try
methods = []
# Use the transfer_method option if set, otherwise use scp_if_ssh
ssh_transfer_method = self.get_option('ssh_transfer_method')
scp_if_ssh = self.get_option('scp_if_ssh')
if ssh_transfer_method is None and scp_if_ssh == 'smart':
ssh_transfer_method = 'smart'
if ssh_transfer_method is not None:
if ssh_transfer_method == 'smart':
methods = smart_methods
else:
methods = [ssh_transfer_method]
else:
# since this can be a non-bool now, we need to handle it correctly
if not isinstance(scp_if_ssh, bool):
scp_if_ssh = scp_if_ssh.lower()
if scp_if_ssh in BOOLEANS:
scp_if_ssh = boolean(scp_if_ssh, strict=False)
elif scp_if_ssh != 'smart':
raise AnsibleOptionsError('scp_if_ssh needs to be one of [smart|True|False]')
if scp_if_ssh == 'smart':
methods = smart_methods
elif scp_if_ssh is True:
methods = ['scp']
else:
methods = ['sftp']
for method in methods:
returncode = stdout = stderr = None
if method == 'sftp':
cmd = self._build_command(self.get_option('sftp_executable'), 'sftp', to_bytes(host))
in_data = u"{0} {1} {2}\n".format(sftp_action, shlex.quote(in_path), shlex.quote(out_path))
in_data = to_bytes(in_data, nonstring='passthru')
(returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
elif method == 'scp':
scp = self.get_option('scp_executable')
if sftp_action == 'get':
cmd = self._build_command(scp, 'scp', u'{0}:{1}'.format(host, self._shell.quote(in_path)), out_path)
else:
cmd = self._build_command(scp, 'scp', in_path, u'{0}:{1}'.format(host, self._shell.quote(out_path)))
in_data = None
(returncode, stdout, stderr) = self._bare_run(cmd, in_data, checkrc=False)
elif method == 'piped':
if sftp_action == 'get':
# we pass sudoable=False to disable pty allocation, which
# would end up mixing stdout/stderr and screwing with newlines
(returncode, stdout, stderr) = self.exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE), sudoable=False)
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
out_file.write(stdout)
else:
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as f:
in_data = to_bytes(f.read(), nonstring='passthru')
if not in_data:
count = ' count=0'
else:
count = ''
(returncode, stdout, stderr) = self.exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), in_data=in_data, sudoable=False)
# Check the return code and rollover to next method if failed
if returncode == 0:
return (returncode, stdout, stderr)
else:
# If not in smart mode, the data will be printed by the raise below
if len(methods) > 1:
display.warning(u'%s transfer mechanism failed on %s. Use ANSIBLE_DEBUG=1 to see detailed information' % (method, host))
display.debug(u'%s' % to_text(stdout))
display.debug(u'%s' % to_text(stderr))
if returncode == 255:
raise AnsibleConnectionFailure("Failed to connect to the host via %s: %s" % (method, to_native(stderr)))
else:
raise AnsibleError("failed to transfer file to %s %s:\n%s\n%s" %
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
def _escape_win_path(self, path):
""" converts a Windows path to one that's supported by SFTP and SCP """
# If using a root path then we need to start with /
prefix = ""
if re.match(r'^\w{1}:', path):
prefix = "/"
# Convert all '\' to '/'
return "%s%s" % (prefix, path.replace("\\", "/"))
#
# Main public methods
#
def exec_command(self, cmd, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self.user), host=self.host)
if getattr(self._shell, "_IS_WINDOWS", False):
# Become method 'runas' is done in the wrapper that is executed,
# need to disable sudoable so the bare_run is not waiting for a
# prompt that will not occur
sudoable = False
# Make sure our first command is to set the console encoding to
# utf-8, this must be done via chcp to get utf-8 (65001)
cmd_parts = ["chcp.com", "65001", self._shell._SHELL_REDIRECT_ALLNULL, self._shell._SHELL_AND]
cmd_parts.extend(self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False))
cmd = ' '.join(cmd_parts)
# we can only use tty when we are not pipelining the modules. piping
# data into /usr/bin/python inside a tty automatically invokes the
# python interactive-mode but the modules are not compatible with the
# interactive-mode ("unexpected indent" mainly because of empty lines)
ssh_executable = self.get_option('ssh_executable')
# -tt can cause various issues in some environments so allow the user
# to disable it as a troubleshooting method.
use_tty = self.get_option('use_tty')
if not in_data and sudoable and use_tty:
args = ('-tt', self.host, cmd)
else:
args = (self.host, cmd)
cmd = self._build_command(ssh_executable, 'ssh', *args)
(returncode, stdout, stderr) = self._run(cmd, in_data, sudoable=sudoable)
# When running on Windows, stderr may contain CLIXML encoded output
if getattr(self._shell, "_IS_WINDOWS", False) and stderr.startswith(b"#< CLIXML"):
stderr = _parse_clixml(stderr)
return (returncode, stdout, stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_native(in_path)))
if getattr(self._shell, "_IS_WINDOWS", False):
out_path = self._escape_win_path(out_path)
return self._file_transport_command(in_path, out_path, 'put')
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
# need to add / if path is rooted
if getattr(self._shell, "_IS_WINDOWS", False):
in_path = self._escape_win_path(in_path)
return self._file_transport_command(in_path, out_path, 'get')
def reset(self):
run_reset = False
# If we have a persistent ssh connection (ControlPersist), we can ask it to stop listening.
# only run the reset if the ControlPath already exists or if it isn't configured and ControlPersist is set
# 'check' will determine this.
cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'check', self.host)
display.vvv(u'sending connection check: %s' % to_text(cmd))
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
status_code = p.wait()
if status_code != 0:
display.vvv(u"No connection to reset: %s" % to_text(stderr))
else:
run_reset = True
if run_reset:
cmd = self._build_command(self.get_option('ssh_executable'), 'ssh', '-O', 'stop', self.host)
display.vvv(u'sending connection stop: %s' % to_text(cmd))
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
status_code = p.wait()
if status_code != 0:
display.warning(u"Failed to reset connection:%s" % to_text(stderr))
self.close()
def close(self):
self._connected = False
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 74,417 |
reboot fails after a long time with Ubuntu when verbosity set to connection debug
|
### Summary
When I perform a reboot with Ubuntu 18.04 or 20.04 remote host and with verbosity set to connection debug (-vvvv) reboot fails after a long time (several hours, much more than reboot_timeout). If I set it to debug (-vvv) it works like expected.
Tested with a Centos 7 remote host and it is not concerned by this bug.
This bug can be reproduced with devel branch but was discovered on 2.10 version
### Issue Type
Bug Report
### Component Name
reboot
### Ansible Version
```console
$ ansible --version
ansible 2.10.7
```
### Configuration
```console
$ ansible-config dump --only-changed
ANSIBLE_SSH_ARGS(ansible.cfg) = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null
HOST_KEY_CHECKING(ansible.cfg) = False
```
### OS / Environment
Control Node: Ubuntu 20.04
Remote node: Ubuntu 20.04 and 18.04
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: "Dumb playbook"
hosts:
- My-Remote-Host
gather_facts: false
tasks:
- name: Reboot
reboot:
become: True
become_method: sudo
```
### Expected Results
```
<X.X.X.X> SSH: EXEC sshpass -d12 ssh -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a -tt X.X.X.X '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=tzyslkziuuaepfcbrlaamnnnwqdftlts] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-tzyslkziuuaepfcbrlaamnnnwqdftlts ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<X.X.X.X> (0, b'\r\n9eba9e53-7bd2-4178-8d58-65f090990e83\r\n', b"Warning: Permanently added 'X.X.X.X' (ECDSA) to the list of known hosts.\r\nShared connection to X.X.X.X closed.\r\n")
reboot: attempting post-reboot test command
<X.X.X.X> ESTABLISH SSH CONNECTION FOR USER: host-user
<X.X.X.X> SSH: EXEC sshpass -d12 ssh -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a -tt X.X.X.X '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=mqdzbdqgyzzzqykxxumonzhvcptnfnqa] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-mqdzbdqgyzzzqykxxumonzhvcptnfnqa ; whoami'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
<X.X.X.X> (0, b'\r\nroot\r\n', b'Shared connection to X.X.X.X closed.\r\n')
reboot: system successfully rebooted
<X.X.X.X> ESTABLISH SSH CONNECTION FOR USER: host-user
<X.X.X.X> SSH: EXEC sshpass -d12 ssh -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a X.X.X.X '/bin/sh -c '"'"'rm -f -r /home/host-user/.ansible/tmp/ansible-tmp-1619445760.4098256-1763116-260472738004687/ > /dev/null 2>&1 && sleep 0'"'"''
<X.X.X.X> (0, b'', b'')
changed: [My-Remote-Host] => {
"changed": true,
"elapsed": 21,
"rebooted": true
}
META: ran handlers
META: ran handlers
```
### Actual Results
```console
<X.X.X.X> SSH: EXEC sshpass -d10 ssh -vvv -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a -tt X.X.X.X '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=ptpwkfkjmbkbufkguzcinvxkirsrvysf] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-ptpwkfkjmbkbufkguzcinvxkirsrvysf ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"' && sleep 0'"'"''
<X.X.X.X> (255, b'', b'OpenSSH_8.2p1 Ubuntu-4ubuntu0.1, OpenSSL 1.1.1f 31 Mar 2020\r\ndebug1: Reading configuration data /etc/ssh/ssh_config\r\ndebug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files\r\ndebug1: /etc/ssh/ssh_config line 21: Applying options for *\r\ndebug2: resolve_canonicalize: hostname X.X.X.X is address\r\ndebug1: auto-mux: Trying existing master\r\ndebug1: Control socket "~/.ansible/cp/02ec46349a" does not exist\r\ndebug2: ssh_connect_direct\r\ndebug1: Connecting to X.X.X.X [X.X.X.X] port 22.\r\ndebug2: fd 3 setting O_NONBLOCK\r\ndebug1: connect to address X.X.X.X port 22: Connection refused\r\nssh: connect to host X.X.X.X port 22: Connection refused\r\n')
sending connection check: [b'sshpass', b'-d10', b'ssh', b'-vvv', b'-o', b'ControlMaster=auto', b'-o', b'ControlPersist=60s', b'-o', b'UserKnownHostsFile=/dev/null', b'-o', b'StrictHostKeyChecking=no', b'-o', b'Port=22', b'-o', b'User="host-user"', b'-o', b'ConnectTimeout=10', b'-o', b'ControlPath=~/.ansible/cp/02ec46349a', b'-O', b'check', b'X.X.X.X']
No connection to reset: OpenSSH_8.2p1 Ubuntu-4ubuntu0.1, OpenSSL 1.1.1f 31 Mar 2020
debug1: Reading configuration data /etc/ssh/ssh_config
debug1: /etc/ssh/ssh_config line 19: include /etc/ssh/ssh_config.d/*.conf matched no files
debug1: /etc/ssh/ssh_config line 21: Applying options for *
debug2: resolve_canonicalize: hostname X.X.X.X is address
debug1: auto-mux: Trying existing master
Control socket connect(~/.ansible/cp/02ec46349a): No such file or directory
reboot: attempting to get system boot time
<X.X.X.X> ESTABLISH SSH CONNECTION FOR USER: host-user
<X.X.X.X> SSH: EXEC sshpass -d12 ssh -vvv -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o Port=22 -o 'User="host-user"' -o ConnectTimeout=10 -o ControlPath=~/.ansible/cp/02ec46349a -tt X.X.X.X '/bin/sh -c '"'"'sudo -H -S -p "[sudo via ansible, key=bpysdtsdgvijqvsmtxxbnykpmxgrhqho] password:" -u root /bin/sh -c '"'"'"'"'"'"'"'"'echo BECOME-SUCCESS-bpysdtsdgvijqvsmtxxbnykpmxgrhqho ; cat /proc/sys/kernel/random/boot_id'"'"'"'"'"'"'"'"' && sleep 0'"'"''
Escalation succeeded
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/74417
|
https://github.com/ansible/ansible/pull/76732
|
9142be2f6cabbe6597c9254c5bb9186d17036d55
|
0ff80a15ba40c2aff3b96c1152f19c97a92d3c97
| 2021-04-26T14:23:59Z |
python
| 2022-01-13T21:28:09Z |
test/units/plugins/connection/test_ssh.py
|
# -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from io import StringIO
import pytest
from ansible import constants as C
from ansible.errors import AnsibleAuthenticationFailure
from units.compat import unittest
from units.compat.mock import patch, MagicMock, PropertyMock
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.module_utils.compat.selectors import SelectorKey, EVENT_READ
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes
from ansible.playbook.play_context import PlayContext
from ansible.plugins.connection import ssh
from ansible.plugins.loader import connection_loader, become_loader
class TestConnectionBaseClass(unittest.TestCase):
def test_plugins_connection_ssh_module(self):
play_context = PlayContext()
play_context.prompt = (
'[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
)
in_stream = StringIO()
self.assertIsInstance(ssh.Connection(play_context, in_stream), ssh.Connection)
def test_plugins_connection_ssh_basic(self):
pc = PlayContext()
new_stdin = StringIO()
conn = ssh.Connection(pc, new_stdin)
# connect just returns self, so assert that
res = conn._connect()
self.assertEqual(conn, res)
ssh.SSHPASS_AVAILABLE = False
self.assertFalse(conn._sshpass_available())
ssh.SSHPASS_AVAILABLE = True
self.assertTrue(conn._sshpass_available())
with patch('subprocess.Popen') as p:
ssh.SSHPASS_AVAILABLE = None
p.return_value = MagicMock()
self.assertTrue(conn._sshpass_available())
ssh.SSHPASS_AVAILABLE = None
p.return_value = None
p.side_effect = OSError()
self.assertFalse(conn._sshpass_available())
conn.close()
self.assertFalse(conn._connected)
def test_plugins_connection_ssh__build_command(self):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn.get_option = MagicMock()
conn.get_option.return_value = ""
conn._build_command('ssh', 'ssh')
def test_plugins_connection_ssh_exec_command(self):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn._build_command = MagicMock()
conn._build_command.return_value = 'ssh something something'
conn._run = MagicMock()
conn._run.return_value = (0, 'stdout', 'stderr')
conn.get_option = MagicMock()
conn.get_option.return_value = True
res, stdout, stderr = conn.exec_command('ssh')
res, stdout, stderr = conn.exec_command('ssh', 'this is some data')
def test_plugins_connection_ssh__examine_output(self):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn.set_become_plugin(become_loader.get('sudo'))
conn.become.check_password_prompt = MagicMock()
conn.become.check_success = MagicMock()
conn.become.check_incorrect_password = MagicMock()
conn.become.check_missing_password = MagicMock()
def _check_password_prompt(line):
if b'foo' in line:
return True
return False
def _check_become_success(line):
if b'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz' in line:
return True
return False
def _check_incorrect_password(line):
if b'incorrect password' in line:
return True
return False
def _check_missing_password(line):
if b'bad password' in line:
return True
return False
# test examining output for prompt
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = True
# override become plugin
conn.become.prompt = True
conn.become.check_password_prompt = MagicMock(side_effect=_check_password_prompt)
conn.become.check_success = MagicMock(side_effect=_check_become_success)
conn.become.check_incorrect_password = MagicMock(side_effect=_check_incorrect_password)
conn.become.check_missing_password = MagicMock(side_effect=_check_missing_password)
def get_option(option):
if option == 'become_pass':
return 'password'
return None
conn.become.get_option = get_option
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nfoo\nline 3\nthis should be the remainder', False)
self.assertEqual(output, b'line 1\nline 2\nline 3\n')
self.assertEqual(unprocessed, b'this should be the remainder')
self.assertTrue(conn._flags['become_prompt'])
self.assertFalse(conn._flags['become_success'])
self.assertFalse(conn._flags['become_error'])
self.assertFalse(conn._flags['become_nopasswd_error'])
# test examining output for become prompt
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = False
conn.become.prompt = False
pc.success_key = u'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz'
conn.become.success = u'BECOME-SUCCESS-abcdefghijklmnopqrstuvxyz'
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nBECOME-SUCCESS-abcdefghijklmnopqrstuvxyz\nline 3\n', False)
self.assertEqual(output, b'line 1\nline 2\nline 3\n')
self.assertEqual(unprocessed, b'')
self.assertFalse(conn._flags['become_prompt'])
self.assertTrue(conn._flags['become_success'])
self.assertFalse(conn._flags['become_error'])
self.assertFalse(conn._flags['become_nopasswd_error'])
# test examining output for become failure
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = False
conn.become.prompt = False
pc.success_key = None
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nline 2\nincorrect password\n', True)
self.assertEqual(output, b'line 1\nline 2\nincorrect password\n')
self.assertEqual(unprocessed, b'')
self.assertFalse(conn._flags['become_prompt'])
self.assertFalse(conn._flags['become_success'])
self.assertTrue(conn._flags['become_error'])
self.assertFalse(conn._flags['become_nopasswd_error'])
# test examining output for missing password
conn._flags = dict(
become_prompt=False,
become_success=False,
become_error=False,
become_nopasswd_error=False,
)
pc.prompt = False
conn.become.prompt = False
pc.success_key = None
output, unprocessed = conn._examine_output(u'source', u'state', b'line 1\nbad password\n', True)
self.assertEqual(output, b'line 1\nbad password\n')
self.assertEqual(unprocessed, b'')
self.assertFalse(conn._flags['become_prompt'])
self.assertFalse(conn._flags['become_success'])
self.assertFalse(conn._flags['become_error'])
self.assertTrue(conn._flags['become_nopasswd_error'])
@patch('time.sleep')
@patch('os.path.exists')
def test_plugins_connection_ssh_put_file(self, mock_ospe, mock_sleep):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn._build_command = MagicMock()
conn._bare_run = MagicMock()
mock_ospe.return_value = True
conn._build_command.return_value = 'some command to run'
conn._bare_run.return_value = (0, '', '')
conn.host = "some_host"
conn.set_option('reconnection_retries', 9)
conn.set_option('ssh_transfer_method', None) # unless set to None scp_if_ssh is ignored
# Test with SCP_IF_SSH set to smart
# Test when SFTP works
conn.set_option('scp_if_ssh', 'smart')
expected_in_data = b' '.join((b'put', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# Test when SFTP doesn't work but SCP does
conn._bare_run.side_effect = [(1, 'stdout', 'some errors'), (0, '', '')]
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
conn._bare_run.side_effect = None
# test with SCP_IF_SSH enabled
conn.set_option('scp_if_ssh', True)
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
# test with SCPP_IF_SSH disabled
conn.set_option('scp_if_ssh', False)
expected_in_data = b' '.join((b'put', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.put_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
expected_in_data = b' '.join((b'put',
to_bytes(shlex_quote('/path/to/in/file/with/unicode-fö〩')),
to_bytes(shlex_quote('/path/to/dest/file/with/unicode-fö〩')))) + b'\n'
conn.put_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# test that a non-zero rc raises an error
conn._bare_run.return_value = (1, 'stdout', 'some errors')
self.assertRaises(AnsibleError, conn.put_file, '/path/to/bad/file', '/remote/path/to/file')
# test that a not-found path raises an error
mock_ospe.return_value = False
conn._bare_run.return_value = (0, 'stdout', '')
self.assertRaises(AnsibleFileNotFound, conn.put_file, '/path/to/bad/file', '/remote/path/to/file')
@patch('time.sleep')
def test_plugins_connection_ssh_fetch_file(self, mock_sleep):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn._build_command = MagicMock()
conn._bare_run = MagicMock()
conn._load_name = 'ssh'
conn._build_command.return_value = 'some command to run'
conn._bare_run.return_value = (0, '', '')
conn.host = "some_host"
conn.set_option('reconnection_retries', 9)
conn.set_option('ssh_transfer_method', None) # unless set to None scp_if_ssh is ignored
# Test with SCP_IF_SSH set to smart
# Test when SFTP works
conn.set_option('scp_if_ssh', 'smart')
expected_in_data = b' '.join((b'get', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.set_options({})
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# Test when SFTP doesn't work but SCP does
conn._bare_run.side_effect = [(1, 'stdout', 'some errors'), (0, '', '')]
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
# test with SCP_IF_SSH enabled
conn._bare_run.side_effect = None
conn.set_option('ssh_transfer_method', None) # unless set to None scp_if_ssh is ignored
conn.set_option('scp_if_ssh', 'True')
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', None, checkrc=False)
# test with SCP_IF_SSH disabled
conn.set_option('scp_if_ssh', False)
expected_in_data = b' '.join((b'get', to_bytes(shlex_quote('/path/to/in/file')), to_bytes(shlex_quote('/path/to/dest/file')))) + b'\n'
conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
expected_in_data = b' '.join((b'get',
to_bytes(shlex_quote('/path/to/in/file/with/unicode-fö〩')),
to_bytes(shlex_quote('/path/to/dest/file/with/unicode-fö〩')))) + b'\n'
conn.fetch_file(u'/path/to/in/file/with/unicode-fö〩', u'/path/to/dest/file/with/unicode-fö〩')
conn._bare_run.assert_called_with('some command to run', expected_in_data, checkrc=False)
# test that a non-zero rc raises an error
conn._bare_run.return_value = (1, 'stdout', 'some errors')
self.assertRaises(AnsibleError, conn.fetch_file, '/path/to/bad/file', '/remote/path/to/file')
class MockSelector(object):
def __init__(self):
self.files_watched = 0
self.register = MagicMock(side_effect=self._register)
self.unregister = MagicMock(side_effect=self._unregister)
self.close = MagicMock()
self.get_map = MagicMock(side_effect=self._get_map)
self.select = MagicMock()
def _register(self, *args, **kwargs):
self.files_watched += 1
def _unregister(self, *args, **kwargs):
self.files_watched -= 1
def _get_map(self, *args, **kwargs):
return self.files_watched
@pytest.fixture
def mock_run_env(request, mocker):
pc = PlayContext()
new_stdin = StringIO()
conn = connection_loader.get('ssh', pc, new_stdin)
conn.set_become_plugin(become_loader.get('sudo'))
conn._send_initial_data = MagicMock()
conn._examine_output = MagicMock()
conn._terminate_process = MagicMock()
conn._load_name = 'ssh'
conn.sshpass_pipe = [MagicMock(), MagicMock()]
request.cls.pc = pc
request.cls.conn = conn
mock_popen_res = MagicMock()
mock_popen_res.poll = MagicMock()
mock_popen_res.wait = MagicMock()
mock_popen_res.stdin = MagicMock()
mock_popen_res.stdin.fileno.return_value = 1000
mock_popen_res.stdout = MagicMock()
mock_popen_res.stdout.fileno.return_value = 1001
mock_popen_res.stderr = MagicMock()
mock_popen_res.stderr.fileno.return_value = 1002
mock_popen_res.returncode = 0
request.cls.mock_popen_res = mock_popen_res
mock_popen = mocker.patch('subprocess.Popen', return_value=mock_popen_res)
request.cls.mock_popen = mock_popen
request.cls.mock_selector = MockSelector()
mocker.patch('ansible.module_utils.compat.selectors.DefaultSelector', lambda: request.cls.mock_selector)
request.cls.mock_openpty = mocker.patch('pty.openpty')
mocker.patch('fcntl.fcntl')
mocker.patch('os.write')
mocker.patch('os.close')
@pytest.mark.usefixtures('mock_run_env')
class TestSSHConnectionRun(object):
# FIXME:
# These tests are little more than a smoketest. Need to enhance them
# a bit to check that they're calling the relevant functions and making
# complete coverage of the code paths
def test_no_escalation(self):
self.mock_popen_res.stdout.read.side_effect = [b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"my_stderr"]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
assert return_code == 0
assert b_stdout == b'my_stdout\nsecond_line'
assert b_stderr == b'my_stderr'
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
def test_with_password(self):
# test with a password set to trigger the sshpass write
self.pc.password = '12345'
self.mock_popen_res.stdout.read.side_effect = [b"some data", b"", b""]
self.mock_popen_res.stderr.read.side_effect = [b""]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run(["ssh", "is", "a", "cmd"], "this is more data")
assert return_code == 0
assert b_stdout == b'some data'
assert b_stderr == b''
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is more data'
def _password_with_prompt_examine_output(self, sourice, state, b_chunk, sudoable):
if state == 'awaiting_prompt':
self.conn._flags['become_prompt'] = True
elif state == 'awaiting_escalation':
self.conn._flags['become_success'] = True
return (b'', b'')
def test_password_with_prompt(self):
# test with password prompting enabled
self.pc.password = None
self.conn.become.prompt = b'Password:'
self.conn._examine_output.side_effect = self._password_with_prompt_examine_output
self.mock_popen_res.stdout.read.side_effect = [b"Password:", b"Success", b""]
self.mock_popen_res.stderr.read.side_effect = [b""]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ),
(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
assert return_code == 0
assert b_stdout == b''
assert b_stderr == b''
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
def test_password_with_become(self):
# test with some become settings
self.pc.prompt = b'Password:'
self.conn.become.prompt = b'Password:'
self.pc.become = True
self.pc.success_key = 'BECOME-SUCCESS-abcdefg'
self.conn.become._id = 'abcdefg'
self.conn._examine_output.side_effect = self._password_with_prompt_examine_output
self.mock_popen_res.stdout.read.side_effect = [b"Password:", b"BECOME-SUCCESS-abcdefg", b"abc"]
self.mock_popen_res.stderr.read.side_effect = [b"123"]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "this is input data")
self.mock_popen_res.stdin.flush.assert_called_once_with()
assert return_code == 0
assert b_stdout == b'abc'
assert b_stderr == b'123'
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is True
assert self.conn._send_initial_data.call_count == 1
assert self.conn._send_initial_data.call_args[0][1] == 'this is input data'
def test_pasword_without_data(self):
# simulate no data input but Popen using new pty's fails
self.mock_popen.return_value = None
self.mock_popen.side_effect = [OSError(), self.mock_popen_res]
# simulate no data input
self.mock_openpty.return_value = (98, 99)
self.mock_popen_res.stdout.read.side_effect = [b"some data", b"", b""]
self.mock_popen_res.stderr.read.side_effect = [b""]
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[]]
self.mock_selector.get_map.side_effect = lambda: True
return_code, b_stdout, b_stderr = self.conn._run("ssh", "")
assert return_code == 0
assert b_stdout == b'some data'
assert b_stderr == b''
assert self.mock_selector.register.called is True
assert self.mock_selector.register.call_count == 2
assert self.conn._send_initial_data.called is False
@pytest.mark.usefixtures('mock_run_env')
class TestSSHConnectionRetries(object):
def test_incorrect_password(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 5)
monkeypatch.setattr('time.sleep', lambda x: None)
self.mock_popen_res.stdout.read.side_effect = [b'']
self.mock_popen_res.stderr.read.side_effect = [b'Permission denied, please try again.\r\n']
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[5] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = [b'sshpass', b'-d41', b'ssh', b'-C']
exception_info = pytest.raises(AnsibleAuthenticationFailure, self.conn.exec_command, 'sshpass', 'some data')
assert exception_info.value.message == ('Invalid/incorrect username/password. Skipping remaining 5 retries to prevent account lockout: '
'Permission denied, please try again.')
assert self.mock_popen.call_count == 1
def test_retry_then_success(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 3)
monkeypatch.setattr('time.sleep', lambda x: None)
self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 3 + [0] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'ssh'
return_code, b_stdout, b_stderr = self.conn.exec_command('ssh', 'some data')
assert return_code == 0
assert b_stdout == b'my_stdout\nsecond_line'
assert b_stderr == b'my_stderr'
def test_multiple_failures(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 9)
monkeypatch.setattr('time.sleep', lambda x: None)
self.mock_popen_res.stdout.read.side_effect = [b""] * 10
self.mock_popen_res.stderr.read.side_effect = [b""] * 10
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 30)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
] * 10
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'ssh'
pytest.raises(AnsibleConnectionFailure, self.conn.exec_command, 'ssh', 'some data')
assert self.mock_popen.call_count == 10
def test_abitrary_exceptions(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 9)
monkeypatch.setattr('time.sleep', lambda x: None)
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'ssh'
self.mock_popen.side_effect = [Exception('bad')] * 10
pytest.raises(Exception, self.conn.exec_command, 'ssh', 'some data')
assert self.mock_popen.call_count == 10
def test_put_file_retries(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 3)
monkeypatch.setattr('time.sleep', lambda x: None)
monkeypatch.setattr('ansible.plugins.connection.ssh.os.path.exists', lambda x: True)
self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 4 + [0] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'sftp'
return_code, b_stdout, b_stderr = self.conn.put_file('/path/to/in/file', '/path/to/dest/file')
assert return_code == 0
assert b_stdout == b"my_stdout\nsecond_line"
assert b_stderr == b"my_stderr"
assert self.mock_popen.call_count == 2
def test_fetch_file_retries(self, monkeypatch):
self.conn.set_option('host_key_checking', False)
self.conn.set_option('reconnection_retries', 3)
monkeypatch.setattr('time.sleep', lambda x: None)
monkeypatch.setattr('ansible.plugins.connection.ssh.os.path.exists', lambda x: True)
self.mock_popen_res.stdout.read.side_effect = [b"", b"my_stdout\n", b"second_line"]
self.mock_popen_res.stderr.read.side_effect = [b"", b"my_stderr"]
type(self.mock_popen_res).returncode = PropertyMock(side_effect=[255] * 4 + [0] * 4)
self.mock_selector.select.side_effect = [
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stdout, 1001, [EVENT_READ], None), EVENT_READ)],
[(SelectorKey(self.mock_popen_res.stderr, 1002, [EVENT_READ], None), EVENT_READ)],
[]
]
self.mock_selector.get_map.side_effect = lambda: True
self.conn._build_command = MagicMock()
self.conn._build_command.return_value = 'sftp'
return_code, b_stdout, b_stderr = self.conn.fetch_file('/path/to/in/file', '/path/to/dest/file')
assert return_code == 0
assert b_stdout == b"my_stdout\nsecond_line"
assert b_stderr == b"my_stderr"
assert self.mock_popen.call_count == 2
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,734 |
include_vars dir doesn't work with a temporary folder
|
### Summary
I obtain a error with include_vars when the input folder was created by ``tempfile`` instead of ``file``
````
TASK [load inventories into variable] *********************************************************************************************************************
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: UnboundLocalError: local variable 'failed' referenced before assignment
fatal: [master]: FAILED! => {"msg": "Unexpected failure during module execution.", "stdout": ""}
````
but if I use this instead it will works
````
- name: create temp folder
file:
path: /tmp/ansible-inventory-sources
state: directory
register: tmp_folder
````
the result
````
TASK [load inventories into variable] *********************************************************************************************************************
ok: [master] => {"ansible_facts": {"stuff": {"all": {"children": {"masters": {"hosts": {"master": {"ansible_connection": "local", "ansible_host": "192.168.51.180", "domain": "clustervagrant.dynamic.local", "fqdn": "ns1.clustervagrant.local"}}}, "workers": {"hosts": {"worker1": {"ansible_host": "192.168.51.181"}}, "vars": {"ansible_connection": "ssh", "ansible_ssh_pass": "vagrant", "ansible_ssh_user": "vagrant", "ansible_sudo_pass": "vagrant"}}}}, "coucou": {"allo": true}}}, "ansible_included_var_files": ["/tmp/ansible-inventory-sources/inventory-test", "/tmp/ansible-inventory-sources/inventory-vagrant"], "changed": false}
PLAY RECAP ************************************************************************************************************************************************
master : ok=8 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
````
### Issue Type
Bug Report
### Component Name
include_vars
### Ansible Version
```console
$ ansible --version
vagrant@k8s-master:/vagrant/test-templates$ ansible --version
ansible [core 2.12.1]
config file = /vagrant/test-templates/ansible.cfg
configured module search path = ['/home/vagrant/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3/dist-packages/ansible
ansible collection location = /home/vagrant/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.8.5 (default, May 27 2021, 13:30:53) [GCC 9.3.0]
jinja version = 2.10.1
libyaml = True
vagrant@k8s-master:/vagrant/test-templates$
```
### Configuration
```console
$ ansible-config dump --only-changed
vagrant@k8s-master:/vagrant/test-templates/inventory$ ansible-config dump --only-changed
vagrant@k8s-master:/vagrant/test-templates/inventory$
```
### OS / Environment
````
vagrant@k8s-master:/vagrant/test-templates/inventory$ uname -a
Linux k8s-master 5.4.0-42-generic #46-Ubuntu SMP Fri Jul 10 00:24:02 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux
vagrant@k8s-master:/vagrant/test-templates/inventory$
````
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml
---
- hosts: "masters"
become: yes
become_method: sudo
become_user: root
gather_facts: yes
connection: ssh
tasks:
- name: debug through ansible.env
debug: var=ansible_env.HOME
- name: debug through ansible.env
debug: var=ansible_inventory_sources
- name: create temp folder
tempfile:
path: /tmp
prefix: ansible-inventory
state: directory
register: tmp_folder
# WORKS IF I USE THIS BELLOW TO CREATE THE FOLDER
# - name: create temp folder
# file:
# path: /tmp/ansible-inventory-sources
# state: directory
# register: tmp_folder
- name: tmp_folder
debug: var=tmp_folder
- name: create temp folder structure
shell: mkdir -p $(dirname '{{tmp_folder.path}}{{item}}')
with_items:
- "{{ansible_inventory_sources}}"
- name: file inventory used into tmp folder
copy:
src: "{{item}}"
dest: "{{tmp_folder.path}}"
with_items:
- "{{ansible_inventory_sources}}"
- name: load inventories into variable
include_vars:
dir: "{{tmp_folder.path}}"
ignore_unknown_extensions: no
extensions:
- ''
- 'yaml'
- 'yml'
- 'json'
name: stuff
```
### Expected Results
I expect the get the same result for **include_vars** is a use a folder created by ``file`` or ``tempfile``
````
- name: create temp folder
file:
path: /tmp/ansible-inventory-sources
state: directory
register: tmp_folder
- name: tmp_folder
debug: var=tmp_folder
- name: create temp folder structure
shell: mkdir -p $(dirname '{{tmp_folder.path}}{{item}}')
with_items:
- "{{ansible_inventory_sources}}"
- name: file inventory used into tmp folder
copy:
src: "{{item}}"
dest: "{{tmp_folder.path}}"
with_items:
- "{{ansible_inventory_sources}}"
- name: load inventories into variable
include_vars:
dir: "{{tmp_folder.path}}"
ignore_unknown_extensions: no
extensions:
- ''
- 'yaml'
- 'yml'
- 'json'
name: stuff
````
the output
````
vagrant@k8s-master:/vagrant/test-templates$ ansible-playbook -i inventory/inventory-vagrant -i inventory/inventory-test playbooks/test-playbook.yaml -v
Using /vagrant/test-templates/ansible.cfg as config file
[WARNING]: Skipping key (allo) in group (coucou) as it is not a mapping, it is a <class 'bool'>
PLAY [masters] ********************************************************************************************************************************************
TASK [Gathering Facts] ************************************************************************************************************************************
ok: [master]
TASK [debug through ansible.env] **************************************************************************************************************************
ok: [master] => {
"ansible_env.HOME": "/root"
}
TASK [debug through ansible.env] **************************************************************************************************************************
ok: [master] => {
"ansible_inventory_sources": [
"/vagrant/test-templates/inventory/inventory-vagrant",
"/vagrant/test-templates/inventory/inventory-test"
]
}
TASK [create temp folder] *********************************************************************************************************************************
changed: [master] => {"changed": true, "gid": 0, "group": "root", "mode": "0755", "owner": "root", "path": "/tmp/ansible-inventory-sources", "size": 4096, "state": "directory", "uid": 0}
TASK [tmp_folder] *****************************************************************************************************************************************
ok: [master] => {
"tmp_folder": {
"changed": true,
"diff": {
"after": {
"path": "/tmp/ansible-inventory-sources",
"state": "directory"
},
"before": {
"path": "/tmp/ansible-inventory-sources",
"state": "absent"
}
},
"failed": false,
"gid": 0,
"group": "root",
"mode": "0755",
"owner": "root",
"path": "/tmp/ansible-inventory-sources",
"size": 4096,
"state": "directory",
"uid": 0
}
}
TASK [create temp folder structure] ***********************************************************************************************************************
changed: [master] => (item=/vagrant/test-templates/inventory/inventory-vagrant) => {"ansible_loop_var": "item", "changed": true, "cmd": "mkdir -p $(dirname '/tmp/ansible-inventory-sources/vagrant/test-templates/inventory/inventory-vagrant')", "delta": "0:00:00.012595", "end": "2022-01-11 16:10:00.823954", "item": "/vagrant/test-templates/inventory/inventory-vagrant", "msg": "", "rc": 0, "start": "2022-01-11 16:10:00.811359", "stderr": "", "stderr_lines": [], "stdout": "", "stdout_lines": []}
changed: [master] => (item=/vagrant/test-templates/inventory/inventory-test) => {"ansible_loop_var": "item", "changed": true, "cmd": "mkdir -p $(dirname '/tmp/ansible-inventory-sources/vagrant/test-templates/inventory/inventory-test')", "delta": "0:00:00.010872", "end": "2022-01-11 16:10:01.163583", "item": "/vagrant/test-templates/inventory/inventory-test", "msg": "", "rc": 0, "start": "2022-01-11 16:10:01.152711", "stderr": "", "stderr_lines": [], "stdout": "", "stdout_lines": []}
TASK [file inventory used into tmp folder] ****************************************************************************************************************
changed: [master] => (item=/vagrant/test-templates/inventory/inventory-vagrant) => {"ansible_loop_var": "item", "changed": true, "checksum": "8619f133048a1c3b62b02c25f974c19c763c144c", "dest": "/tmp/ansible-inventory-sources/inventory-vagrant", "gid": 0, "group": "root", "item": "/vagrant/test-templates/inventory/inventory-vagrant", "md5sum": "bd85cb52626fe2517fbc5aa988acf023", "mode": "0644", "owner": "root", "size": 474, "src": "/home/vagrant/.ansible/tmp/ansible-tmp-1641917401.2987561-334984-181990277684472/source", "state": "file", "uid": 0}
changed: [master] => (item=/vagrant/test-templates/inventory/inventory-test) => {"ansible_loop_var": "item", "changed": true, "checksum": "bbc1139b095a847fb831b5d128bdeb98104b694b", "dest": "/tmp/ansible-inventory-sources/inventory-test", "gid": 0, "group": "root", "item": "/vagrant/test-templates/inventory/inventory-test", "md5sum": "3bd6b415977f647d3f288bef952d9003", "mode": "0644", "owner": "root", "size": 231, "src": "/home/vagrant/.ansible/tmp/ansible-tmp-1641917402.7306056-334984-106388615338814/source", "state": "file", "uid": 0}
TASK [load inventories into variable] *********************************************************************************************************************
ok: [master] => {"ansible_facts": {"stuff": {"all": {"children": {"masters": {"hosts": {"master": {"ansible_connection": "local", "ansible_host": "192.168.51.180", "domain": "clustervagrant.dynamic.local", "fqdn": "ns1.clustervagrant.local"}}}, "workers": {"hosts": {"worker1": {"ansible_host": "192.168.51.181"}}, "vars": {"ansible_connection": "ssh", "ansible_ssh_pass": "vagrant", "ansible_ssh_user": "vagrant", "ansible_sudo_pass": "vagrant"}}}}, "coucou": {"allo": true}}}, "ansible_included_var_files": ["/tmp/ansible-inventory-sources/inventory-test", "/tmp/ansible-inventory-sources/inventory-vagrant"], "changed": false}
PLAY RECAP ************************************************************************************************************************************************
master : ok=8 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
````
### Actual Results
```console
`
...
TASK [load inventories into variable] *********************************************************************************************************************
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: UnboundLocalError: local variable 'failed' referenced before assignment
fatal: [master]: FAILED! => {"msg": "Unexpected failure during module execution.", "stdout": ""}
````
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76734
|
https://github.com/ansible/ansible/pull/76754
|
4676c08f188fb5dca98df61630c76dba1f0d2d77
|
89c884e2a2bd124b49bf9419f053f659a7d1c554
| 2022-01-11T16:25:31Z |
python
| 2022-01-18T15:04:41Z |
changelogs/fragments/include_vars_failed.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,734 |
include_vars dir doesn't work with a temporary folder
|
### Summary
I obtain a error with include_vars when the input folder was created by ``tempfile`` instead of ``file``
````
TASK [load inventories into variable] *********************************************************************************************************************
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: UnboundLocalError: local variable 'failed' referenced before assignment
fatal: [master]: FAILED! => {"msg": "Unexpected failure during module execution.", "stdout": ""}
````
but if I use this instead it will works
````
- name: create temp folder
file:
path: /tmp/ansible-inventory-sources
state: directory
register: tmp_folder
````
the result
````
TASK [load inventories into variable] *********************************************************************************************************************
ok: [master] => {"ansible_facts": {"stuff": {"all": {"children": {"masters": {"hosts": {"master": {"ansible_connection": "local", "ansible_host": "192.168.51.180", "domain": "clustervagrant.dynamic.local", "fqdn": "ns1.clustervagrant.local"}}}, "workers": {"hosts": {"worker1": {"ansible_host": "192.168.51.181"}}, "vars": {"ansible_connection": "ssh", "ansible_ssh_pass": "vagrant", "ansible_ssh_user": "vagrant", "ansible_sudo_pass": "vagrant"}}}}, "coucou": {"allo": true}}}, "ansible_included_var_files": ["/tmp/ansible-inventory-sources/inventory-test", "/tmp/ansible-inventory-sources/inventory-vagrant"], "changed": false}
PLAY RECAP ************************************************************************************************************************************************
master : ok=8 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
````
### Issue Type
Bug Report
### Component Name
include_vars
### Ansible Version
```console
$ ansible --version
vagrant@k8s-master:/vagrant/test-templates$ ansible --version
ansible [core 2.12.1]
config file = /vagrant/test-templates/ansible.cfg
configured module search path = ['/home/vagrant/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3/dist-packages/ansible
ansible collection location = /home/vagrant/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.8.5 (default, May 27 2021, 13:30:53) [GCC 9.3.0]
jinja version = 2.10.1
libyaml = True
vagrant@k8s-master:/vagrant/test-templates$
```
### Configuration
```console
$ ansible-config dump --only-changed
vagrant@k8s-master:/vagrant/test-templates/inventory$ ansible-config dump --only-changed
vagrant@k8s-master:/vagrant/test-templates/inventory$
```
### OS / Environment
````
vagrant@k8s-master:/vagrant/test-templates/inventory$ uname -a
Linux k8s-master 5.4.0-42-generic #46-Ubuntu SMP Fri Jul 10 00:24:02 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux
vagrant@k8s-master:/vagrant/test-templates/inventory$
````
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml
---
- hosts: "masters"
become: yes
become_method: sudo
become_user: root
gather_facts: yes
connection: ssh
tasks:
- name: debug through ansible.env
debug: var=ansible_env.HOME
- name: debug through ansible.env
debug: var=ansible_inventory_sources
- name: create temp folder
tempfile:
path: /tmp
prefix: ansible-inventory
state: directory
register: tmp_folder
# WORKS IF I USE THIS BELLOW TO CREATE THE FOLDER
# - name: create temp folder
# file:
# path: /tmp/ansible-inventory-sources
# state: directory
# register: tmp_folder
- name: tmp_folder
debug: var=tmp_folder
- name: create temp folder structure
shell: mkdir -p $(dirname '{{tmp_folder.path}}{{item}}')
with_items:
- "{{ansible_inventory_sources}}"
- name: file inventory used into tmp folder
copy:
src: "{{item}}"
dest: "{{tmp_folder.path}}"
with_items:
- "{{ansible_inventory_sources}}"
- name: load inventories into variable
include_vars:
dir: "{{tmp_folder.path}}"
ignore_unknown_extensions: no
extensions:
- ''
- 'yaml'
- 'yml'
- 'json'
name: stuff
```
### Expected Results
I expect the get the same result for **include_vars** is a use a folder created by ``file`` or ``tempfile``
````
- name: create temp folder
file:
path: /tmp/ansible-inventory-sources
state: directory
register: tmp_folder
- name: tmp_folder
debug: var=tmp_folder
- name: create temp folder structure
shell: mkdir -p $(dirname '{{tmp_folder.path}}{{item}}')
with_items:
- "{{ansible_inventory_sources}}"
- name: file inventory used into tmp folder
copy:
src: "{{item}}"
dest: "{{tmp_folder.path}}"
with_items:
- "{{ansible_inventory_sources}}"
- name: load inventories into variable
include_vars:
dir: "{{tmp_folder.path}}"
ignore_unknown_extensions: no
extensions:
- ''
- 'yaml'
- 'yml'
- 'json'
name: stuff
````
the output
````
vagrant@k8s-master:/vagrant/test-templates$ ansible-playbook -i inventory/inventory-vagrant -i inventory/inventory-test playbooks/test-playbook.yaml -v
Using /vagrant/test-templates/ansible.cfg as config file
[WARNING]: Skipping key (allo) in group (coucou) as it is not a mapping, it is a <class 'bool'>
PLAY [masters] ********************************************************************************************************************************************
TASK [Gathering Facts] ************************************************************************************************************************************
ok: [master]
TASK [debug through ansible.env] **************************************************************************************************************************
ok: [master] => {
"ansible_env.HOME": "/root"
}
TASK [debug through ansible.env] **************************************************************************************************************************
ok: [master] => {
"ansible_inventory_sources": [
"/vagrant/test-templates/inventory/inventory-vagrant",
"/vagrant/test-templates/inventory/inventory-test"
]
}
TASK [create temp folder] *********************************************************************************************************************************
changed: [master] => {"changed": true, "gid": 0, "group": "root", "mode": "0755", "owner": "root", "path": "/tmp/ansible-inventory-sources", "size": 4096, "state": "directory", "uid": 0}
TASK [tmp_folder] *****************************************************************************************************************************************
ok: [master] => {
"tmp_folder": {
"changed": true,
"diff": {
"after": {
"path": "/tmp/ansible-inventory-sources",
"state": "directory"
},
"before": {
"path": "/tmp/ansible-inventory-sources",
"state": "absent"
}
},
"failed": false,
"gid": 0,
"group": "root",
"mode": "0755",
"owner": "root",
"path": "/tmp/ansible-inventory-sources",
"size": 4096,
"state": "directory",
"uid": 0
}
}
TASK [create temp folder structure] ***********************************************************************************************************************
changed: [master] => (item=/vagrant/test-templates/inventory/inventory-vagrant) => {"ansible_loop_var": "item", "changed": true, "cmd": "mkdir -p $(dirname '/tmp/ansible-inventory-sources/vagrant/test-templates/inventory/inventory-vagrant')", "delta": "0:00:00.012595", "end": "2022-01-11 16:10:00.823954", "item": "/vagrant/test-templates/inventory/inventory-vagrant", "msg": "", "rc": 0, "start": "2022-01-11 16:10:00.811359", "stderr": "", "stderr_lines": [], "stdout": "", "stdout_lines": []}
changed: [master] => (item=/vagrant/test-templates/inventory/inventory-test) => {"ansible_loop_var": "item", "changed": true, "cmd": "mkdir -p $(dirname '/tmp/ansible-inventory-sources/vagrant/test-templates/inventory/inventory-test')", "delta": "0:00:00.010872", "end": "2022-01-11 16:10:01.163583", "item": "/vagrant/test-templates/inventory/inventory-test", "msg": "", "rc": 0, "start": "2022-01-11 16:10:01.152711", "stderr": "", "stderr_lines": [], "stdout": "", "stdout_lines": []}
TASK [file inventory used into tmp folder] ****************************************************************************************************************
changed: [master] => (item=/vagrant/test-templates/inventory/inventory-vagrant) => {"ansible_loop_var": "item", "changed": true, "checksum": "8619f133048a1c3b62b02c25f974c19c763c144c", "dest": "/tmp/ansible-inventory-sources/inventory-vagrant", "gid": 0, "group": "root", "item": "/vagrant/test-templates/inventory/inventory-vagrant", "md5sum": "bd85cb52626fe2517fbc5aa988acf023", "mode": "0644", "owner": "root", "size": 474, "src": "/home/vagrant/.ansible/tmp/ansible-tmp-1641917401.2987561-334984-181990277684472/source", "state": "file", "uid": 0}
changed: [master] => (item=/vagrant/test-templates/inventory/inventory-test) => {"ansible_loop_var": "item", "changed": true, "checksum": "bbc1139b095a847fb831b5d128bdeb98104b694b", "dest": "/tmp/ansible-inventory-sources/inventory-test", "gid": 0, "group": "root", "item": "/vagrant/test-templates/inventory/inventory-test", "md5sum": "3bd6b415977f647d3f288bef952d9003", "mode": "0644", "owner": "root", "size": 231, "src": "/home/vagrant/.ansible/tmp/ansible-tmp-1641917402.7306056-334984-106388615338814/source", "state": "file", "uid": 0}
TASK [load inventories into variable] *********************************************************************************************************************
ok: [master] => {"ansible_facts": {"stuff": {"all": {"children": {"masters": {"hosts": {"master": {"ansible_connection": "local", "ansible_host": "192.168.51.180", "domain": "clustervagrant.dynamic.local", "fqdn": "ns1.clustervagrant.local"}}}, "workers": {"hosts": {"worker1": {"ansible_host": "192.168.51.181"}}, "vars": {"ansible_connection": "ssh", "ansible_ssh_pass": "vagrant", "ansible_ssh_user": "vagrant", "ansible_sudo_pass": "vagrant"}}}}, "coucou": {"allo": true}}}, "ansible_included_var_files": ["/tmp/ansible-inventory-sources/inventory-test", "/tmp/ansible-inventory-sources/inventory-vagrant"], "changed": false}
PLAY RECAP ************************************************************************************************************************************************
master : ok=8 changed=3 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
````
### Actual Results
```console
`
...
TASK [load inventories into variable] *********************************************************************************************************************
An exception occurred during task execution. To see the full traceback, use -vvv. The error was: UnboundLocalError: local variable 'failed' referenced before assignment
fatal: [master]: FAILED! => {"msg": "Unexpected failure during module execution.", "stdout": ""}
````
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76734
|
https://github.com/ansible/ansible/pull/76754
|
4676c08f188fb5dca98df61630c76dba1f0d2d77
|
89c884e2a2bd124b49bf9419f053f659a7d1c554
| 2022-01-11T16:25:31Z |
python
| 2022-01-18T15:04:41Z |
lib/ansible/plugins/action/include_vars.py
|
# Copyright: (c) 2016, Allen Sanabria <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os import path, walk
import re
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_native, to_text
from ansible.plugins.action import ActionBase
from ansible.utils.vars import combine_vars
class ActionModule(ActionBase):
TRANSFERS_FILES = False
VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']
VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions', 'ignore_unknown_extensions']
VALID_FILE_ARGUMENTS = ['file', '_raw_params']
VALID_ALL = ['name', 'hash_behaviour']
def _set_dir_defaults(self):
if not self.depth:
self.depth = 0
if self.files_matching:
self.matcher = re.compile(r'{0}'.format(self.files_matching))
else:
self.matcher = None
if not self.ignore_files:
self.ignore_files = list()
if isinstance(self.ignore_files, string_types):
self.ignore_files = self.ignore_files.split()
elif isinstance(self.ignore_files, dict):
return {
'failed': True,
'message': '{0} must be a list'.format(self.ignore_files)
}
def _set_args(self):
""" Set instance variables based on the arguments that were passed """
self.hash_behaviour = self._task.args.get('hash_behaviour', None)
self.return_results_as_name = self._task.args.get('name', None)
self.source_dir = self._task.args.get('dir', None)
self.source_file = self._task.args.get('file', None)
if not self.source_dir and not self.source_file:
self.source_file = self._task.args.get('_raw_params')
if self.source_file:
self.source_file = self.source_file.rstrip('\n')
self.depth = self._task.args.get('depth', None)
self.files_matching = self._task.args.get('files_matching', None)
self.ignore_unknown_extensions = self._task.args.get('ignore_unknown_extensions', False)
self.ignore_files = self._task.args.get('ignore_files', None)
self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)
# convert/validate extensions list
if isinstance(self.valid_extensions, string_types):
self.valid_extensions = list(self.valid_extensions)
if not isinstance(self.valid_extensions, list):
raise AnsibleError('Invalid type for "extensions" option, it must be a list')
def run(self, tmp=None, task_vars=None):
""" Load yml files recursively from a directory.
"""
del tmp # tmp no longer has any effect
if task_vars is None:
task_vars = dict()
self.show_content = True
self.included_files = []
# Validate arguments
dirs = 0
files = 0
for arg in self._task.args:
if arg in self.VALID_DIR_ARGUMENTS:
dirs += 1
elif arg in self.VALID_FILE_ARGUMENTS:
files += 1
elif arg in self.VALID_ALL:
pass
else:
raise AnsibleError('{0} is not a valid option in include_vars'.format(to_native(arg)))
if dirs and files:
raise AnsibleError("You are mixing file only and dir only arguments, these are incompatible")
# set internal vars from args
self._set_args()
results = dict()
if self.source_dir:
self._set_dir_defaults()
self._set_root_dir()
if not path.exists(self.source_dir):
failed = True
err_msg = ('{0} directory does not exist'.format(to_native(self.source_dir)))
elif not path.isdir(self.source_dir):
failed = True
err_msg = ('{0} is not a directory'.format(to_native(self.source_dir)))
else:
for root_dir, filenames in self._traverse_dir_depth():
failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))
if failed:
break
results.update(updated_results)
else:
try:
self.source_file = self._find_needle('vars', self.source_file)
failed, err_msg, updated_results = (
self._load_files(self.source_file)
)
if not failed:
results.update(updated_results)
except AnsibleError as e:
failed = True
err_msg = to_native(e)
if self.return_results_as_name:
scope = dict()
scope[self.return_results_as_name] = results
results = scope
result = super(ActionModule, self).run(task_vars=task_vars)
if failed:
result['failed'] = failed
result['message'] = err_msg
elif self.hash_behaviour is not None and self.hash_behaviour != C.DEFAULT_HASH_BEHAVIOUR:
merge_hashes = self.hash_behaviour == 'merge'
for key, value in results.items():
old_value = task_vars.get(key, None)
results[key] = combine_vars(old_value, value, merge=merge_hashes)
result['ansible_included_var_files'] = self.included_files
result['ansible_facts'] = results
result['_ansible_no_log'] = not self.show_content
return result
def _set_root_dir(self):
if self._task._role:
if self.source_dir.split('/')[0] == 'vars':
path_to_use = (
path.join(self._task._role._role_path, self.source_dir)
)
if path.exists(path_to_use):
self.source_dir = path_to_use
else:
path_to_use = (
path.join(
self._task._role._role_path, 'vars', self.source_dir
)
)
self.source_dir = path_to_use
else:
if hasattr(self._task._ds, '_data_source'):
current_dir = (
"/".join(self._task._ds._data_source.split('/')[:-1])
)
self.source_dir = path.join(current_dir, self.source_dir)
def _traverse_dir_depth(self):
""" Recursively iterate over a directory and sort the files in
alphabetical order. Do not iterate pass the set depth.
The default depth is unlimited.
"""
current_depth = 0
sorted_walk = list(walk(self.source_dir))
sorted_walk.sort(key=lambda x: x[0])
for current_root, current_dir, current_files in sorted_walk:
current_depth += 1
if current_depth <= self.depth or self.depth == 0:
current_files.sort()
yield (current_root, current_files)
else:
break
def _ignore_file(self, filename):
""" Return True if a file matches the list of ignore_files.
Args:
filename (str): The filename that is being matched against.
Returns:
Boolean
"""
for file_type in self.ignore_files:
try:
if re.search(r'{0}$'.format(file_type), filename):
return True
except Exception:
err_msg = 'Invalid regular expression: {0}'.format(file_type)
raise AnsibleError(err_msg)
return False
def _is_valid_file_ext(self, source_file):
""" Verify if source file has a valid extension
Args:
source_file (str): The full path of source file or source file.
Returns:
Bool
"""
file_ext = path.splitext(source_file)
return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)
def _load_files(self, filename, validate_extensions=False):
""" Loads a file and converts the output into a valid Python dict.
Args:
filename (str): The source file.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
if validate_extensions and not self._is_valid_file_ext(filename):
failed = True
err_msg = ('{0} does not have a valid extension: {1}'.format(to_native(filename), ', '.join(self.valid_extensions)))
else:
b_data, show_content = self._loader._get_file_contents(filename)
data = to_text(b_data, errors='surrogate_or_strict')
self.show_content = show_content
data = self._loader.load(data, file_name=filename, show_content=show_content)
if not data:
data = dict()
if not isinstance(data, dict):
failed = True
err_msg = ('{0} must be stored as a dictionary/hash'.format(to_native(filename)))
else:
self.included_files.append(filename)
results.update(data)
return failed, err_msg, results
def _load_files_in_dir(self, root_dir, var_files):
""" Load the found yml files and update/overwrite the dictionary.
Args:
root_dir (str): The base directory of the list of files that is being passed.
var_files: (list): List of files to iterate over and load into a dictionary.
Returns:
Tuple (bool, str, dict)
"""
results = dict()
failed = False
err_msg = ''
for filename in var_files:
stop_iter = False
# Never include main.yml from a role, as that is the default included by the role
if self._task._role:
if path.join(self._task._role._role_path, filename) == path.join(root_dir, 'vars', 'main.yml'):
stop_iter = True
continue
filepath = path.join(root_dir, filename)
if self.files_matching:
if not self.matcher.search(filename):
stop_iter = True
if not stop_iter and not failed:
if self.ignore_unknown_extensions:
if path.exists(filepath) and not self._ignore_file(filename) and self._is_valid_file_ext(filename):
failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
if not failed:
results.update(loaded_data)
else:
if path.exists(filepath) and not self._ignore_file(filename):
failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
if not failed:
results.update(loaded_data)
return failed, err_msg, results
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,782 |
Task names on wrong tasks when strategy=free
|
### Summary
In 2.11.7 when using strategy=free, the task names that are printed for each task are often names from different tasks and not the task that is being executed.
The task names are mixed up between hosts, between lines within a role, and even between roles (task files).
Also TASK lines are printed without any other output, not even name of the host where the supposed task is running.
### Issue Type
Bug Report
### Component Name
pip
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.7]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/x/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /opt/venv/lib64/python3.6/site-packages/ansible
ansible collection location = /home/x/.ansible/collections:/usr/share/ansible/collections
executable location = /opt/venv/bin/ansible
python version = 3.6.8 (default, Nov 16 2020, 16:55:22) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)]
jinja version = 3.0.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
CALLBACKS_ENABLED(/etc/ansible/ansible.cfg) = ['ansible.posix.timer']
CONTROLLER_PYTHON_WARNING(/etc/ansible/ansible.cfg) = False
DEFAULT_FORKS(/etc/ansible/ansible.cfg) = 10
DEFAULT_STDOUT_CALLBACK(/etc/ansible/ansible.cfg) = community.general.yaml
DEFAULT_STRATEGY(/etc/ansible/ansible.cfg) = ansible.builtin.free
DEFAULT_TIMEOUT(/etc/ansible/ansible.cfg) = 30
DEFAULT_VAULT_ID_MATCH(/etc/ansible/ansible.cfg) = True
INJECT_FACTS_AS_VARS(/etc/ansible/ansible.cfg) = False
```
### OS / Environment
EL7
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
### Expected Results
As in 2.9 Ansible should not print useless "TASK" lines and should print the right task name to go with each task output.
### Actual Results
```console
Example:
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:184
ok: [hostb.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:184
ok: [hostc.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
ok: [hosta.example.com] => changed=false
msg: ''
rc: 0
results:
- 2:xinetd-2.3.15-14.el7.x86_64 providing xinetd is already installed
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
ok: [hostb.example.com] => changed=false
msg: ''
rc: 0
results:
- 2:xinetd-2.3.15-14.el7.x86_64 providing xinetd is already installed
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
ok: [hostc.example.com] => changed=false
msg: ''
rc: 0
results:
- 2:xinetd-2.3.15-14.el7.x86_64 providing xinetd is already installed
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
ok: [hosta.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:204
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
ok: [hostb.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:204
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
ok: [hostc.example.com] => changed=false
backup: ''
msg: ''
The above log consists of 14 "TASK" lines. Most of those are wrong:
1. Correct task name for line number 190. But no hostname and no output from task, seems this line is useless.
2. Incorrect task name for line number 184, as seen from command output it is a "lineinfile" task, named "Add line in file". It is not a "yum" task.
3. Repeating #1.
4. Same as #2 but different host.
5. Finally a correct task name, this is a "yum" task output.
6. Correct task name for line number 196. But no hostname and no output from task, seems this line is useless.
7. Incorrect task name for line number 190, this time it is indeed a "yum" task but the TASK name printed is for a "lineinfile" task.
8. Correct task name for line number 196. But no hostname and no output from task, seems this line is useless.
9. Same as #7.
10. Finally a correct task name, this is a "lineinfile" task output.
11. Correct task name for line number 204. But no hostname and no output from task, seems this line is useless.
12. Incorrect task name for line number 196.
13. Correct task name for line number 204. But no hostname and no output from task, seems this line is useless.
14. Incorrect task name for line number 196.
Out of 14 (!!!) "TASK" lines and all the other lines that go with them, only:
- 3 occurrences had right task name for the line number and had some useful information (host name, task output).
- 6 lines (and the following role file + line number, empty line) seemed useless.
This makes Ansible output hugely long with all the useless 3 line combinations of TASK+file/linenumber+empty line.
Not to mention I have to manually decode each file + line number because the task name printed is often wrong.
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76782
|
https://github.com/ansible/ansible/pull/76787
|
6de06377d0671fa9b3f19241dcacf9dbb093fcd8
|
29bdb8bf1e1ab7ca35721dad1a58efc966d56bd4
| 2022-01-18T11:18:44Z |
python
| 2022-01-18T19:57:42Z |
changelogs/fragments/76782-fqcn-compare-lockstep-strategies.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,782 |
Task names on wrong tasks when strategy=free
|
### Summary
In 2.11.7 when using strategy=free, the task names that are printed for each task are often names from different tasks and not the task that is being executed.
The task names are mixed up between hosts, between lines within a role, and even between roles (task files).
Also TASK lines are printed without any other output, not even name of the host where the supposed task is running.
### Issue Type
Bug Report
### Component Name
pip
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.7]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/x/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /opt/venv/lib64/python3.6/site-packages/ansible
ansible collection location = /home/x/.ansible/collections:/usr/share/ansible/collections
executable location = /opt/venv/bin/ansible
python version = 3.6.8 (default, Nov 16 2020, 16:55:22) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)]
jinja version = 3.0.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
CALLBACKS_ENABLED(/etc/ansible/ansible.cfg) = ['ansible.posix.timer']
CONTROLLER_PYTHON_WARNING(/etc/ansible/ansible.cfg) = False
DEFAULT_FORKS(/etc/ansible/ansible.cfg) = 10
DEFAULT_STDOUT_CALLBACK(/etc/ansible/ansible.cfg) = community.general.yaml
DEFAULT_STRATEGY(/etc/ansible/ansible.cfg) = ansible.builtin.free
DEFAULT_TIMEOUT(/etc/ansible/ansible.cfg) = 30
DEFAULT_VAULT_ID_MATCH(/etc/ansible/ansible.cfg) = True
INJECT_FACTS_AS_VARS(/etc/ansible/ansible.cfg) = False
```
### OS / Environment
EL7
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
### Expected Results
As in 2.9 Ansible should not print useless "TASK" lines and should print the right task name to go with each task output.
### Actual Results
```console
Example:
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:184
ok: [hostb.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:184
ok: [hostc.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
ok: [hosta.example.com] => changed=false
msg: ''
rc: 0
results:
- 2:xinetd-2.3.15-14.el7.x86_64 providing xinetd is already installed
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
ok: [hostb.example.com] => changed=false
msg: ''
rc: 0
results:
- 2:xinetd-2.3.15-14.el7.x86_64 providing xinetd is already installed
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
ok: [hostc.example.com] => changed=false
msg: ''
rc: 0
results:
- 2:xinetd-2.3.15-14.el7.x86_64 providing xinetd is already installed
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
ok: [hosta.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:204
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
ok: [hostb.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:204
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
ok: [hostc.example.com] => changed=false
backup: ''
msg: ''
The above log consists of 14 "TASK" lines. Most of those are wrong:
1. Correct task name for line number 190. But no hostname and no output from task, seems this line is useless.
2. Incorrect task name for line number 184, as seen from command output it is a "lineinfile" task, named "Add line in file". It is not a "yum" task.
3. Repeating #1.
4. Same as #2 but different host.
5. Finally a correct task name, this is a "yum" task output.
6. Correct task name for line number 196. But no hostname and no output from task, seems this line is useless.
7. Incorrect task name for line number 190, this time it is indeed a "yum" task but the TASK name printed is for a "lineinfile" task.
8. Correct task name for line number 196. But no hostname and no output from task, seems this line is useless.
9. Same as #7.
10. Finally a correct task name, this is a "lineinfile" task output.
11. Correct task name for line number 204. But no hostname and no output from task, seems this line is useless.
12. Incorrect task name for line number 196.
13. Correct task name for line number 204. But no hostname and no output from task, seems this line is useless.
14. Incorrect task name for line number 196.
Out of 14 (!!!) "TASK" lines and all the other lines that go with them, only:
- 3 occurrences had right task name for the line number and had some useful information (host name, task output).
- 6 lines (and the following role file + line number, empty line) seemed useless.
This makes Ansible output hugely long with all the useless 3 line combinations of TASK+file/linenumber+empty line.
Not to mention I have to manually decode each file + line number because the task name printed is often wrong.
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76782
|
https://github.com/ansible/ansible/pull/76787
|
6de06377d0671fa9b3f19241dcacf9dbb093fcd8
|
29bdb8bf1e1ab7ca35721dad1a58efc966d56bd4
| 2022-01-18T11:18:44Z |
python
| 2022-01-18T19:57:42Z |
lib/ansible/plugins/callback/default.py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: default
type: stdout
short_description: default Ansible screen output
version_added: historical
description:
- This is the default output callback for ansible-playbook.
extends_documentation_fragment:
- default_callback
- result_format_callback
requirements:
- set as stdout in configuration
'''
from ansible import constants as C
from ansible import context
from ansible.playbook.task_include import TaskInclude
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
# These values use ansible.constants for historical reasons, mostly to allow
# unmodified derivative plugins to work. However, newer options added to the
# plugin are not also added to ansible.constants, so authors of derivative
# callback plugins will eventually need to add a reference to the common docs
# fragment for the 'default' callback plugin
# these are used to provide backwards compat with old plugins that subclass from default
# but still don't use the new config system and/or fail to document the options
# TODO: Change the default of check_mode_markers to True in a future release (2.13)
COMPAT_OPTIONS = (('display_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS),
('display_ok_hosts', True),
('show_custom_stats', C.SHOW_CUSTOM_STATS),
('display_failed_stderr', False),
('check_mode_markers', False),
('show_per_host_start', False))
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'default'
def __init__(self):
self._play = None
self._last_task_banner = None
self._last_task_name = None
self._task_type_cache = {}
super(CallbackModule, self).__init__()
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
# for backwards compat with plugins subclassing default, fallback to constants
for option, constant in COMPAT_OPTIONS:
try:
value = self.get_option(option)
except (AttributeError, KeyError):
self._display.deprecated("'%s' is subclassing DefaultCallback without the corresponding doc_fragment." % self._load_name,
version='2.14', collection_name='ansible.builtin')
value = constant
setattr(self, option, value)
def v2_runner_on_failed(self, result, ignore_errors=False):
host_label = self.host_label(result)
self._clean_results(result._result, result._task.action)
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._handle_exception(result._result, use_stderr=self.display_failed_stderr)
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if self._display.verbosity < 2 and self.get_option('show_task_path_on_failure'):
self._print_task_path(result._task)
msg = "fatal: [%s]: FAILED! => %s" % (host_label, self._dump_results(result._result))
self._display.display(msg, color=C.COLOR_ERROR, stderr=self.display_failed_stderr)
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_runner_on_ok(self, result):
host_label = self.host_label(result)
if isinstance(result._task, TaskInclude):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
return
elif result._result.get('changed', False):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
msg = "changed: [%s]" % (host_label,)
color = C.COLOR_CHANGED
else:
if not self.display_ok_hosts:
return
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
msg = "ok: [%s]" % (host_label,)
color = C.COLOR_OK
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
self._clean_results(result._result, result._task.action)
if self._run_is_verbose(result):
msg += " => %s" % (self._dump_results(result._result),)
self._display.display(msg, color=color)
def v2_runner_on_skipped(self, result):
if self.display_skipped_hosts:
self._clean_results(result._result, result._task.action)
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
msg = "skipping: [%s]" % result._host.get_name()
if self._run_is_verbose(result):
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
host_label = self.host_label(result)
msg = "fatal: [%s]: UNREACHABLE! => %s" % (host_label, self._dump_results(result._result))
self._display.display(msg, color=C.COLOR_UNREACHABLE, stderr=self.display_failed_stderr)
def v2_playbook_on_no_hosts_matched(self):
self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
def v2_playbook_on_no_hosts_remaining(self):
self._display.banner("NO MORE HOSTS LEFT")
def v2_playbook_on_task_start(self, task, is_conditional):
self._task_start(task, prefix='TASK')
def _task_start(self, task, prefix=None):
# Cache output prefix for task if provided
# This is needed to properly display 'RUNNING HANDLER' and similar
# when hiding skipped/ok task results
if prefix is not None:
self._task_type_cache[task._uuid] = prefix
# Preserve task name, as all vars may not be available for templating
# when we need it later
if self._play.strategy in ('free', 'host_pinned'):
# Explicitly set to None for strategy free/host_pinned to account for any cached
# task title from a previous non-free play
self._last_task_name = None
else:
self._last_task_name = task.get_name().strip()
# Display the task banner immediately if we're not doing any filtering based on task result
if self.display_skipped_hosts and self.display_ok_hosts:
self._print_task_banner(task)
def _print_task_banner(self, task):
# args can be specified as no_log in several places: in the task or in
# the argument spec. We can check whether the task is no_log but the
# argument spec can't be because that is only run on the target
# machine and we haven't run it thereyet at this time.
#
# So we give people a config option to affect display of the args so
# that they can secure this if they feel that their stdout is insecure
# (shoulder surfing, logging stdout straight to a file, etc).
args = ''
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
args = u', '.join(u'%s=%s' % a for a in task.args.items())
args = u' %s' % args
prefix = self._task_type_cache.get(task._uuid, 'TASK')
# Use cached task name
task_name = self._last_task_name
if task_name is None:
task_name = task.get_name().strip()
if task.check_mode and self.check_mode_markers:
checkmsg = " [CHECK MODE]"
else:
checkmsg = ""
self._display.banner(u"%s [%s%s]%s" % (prefix, task_name, args, checkmsg))
if self._display.verbosity >= 2:
self._print_task_path(task)
self._last_task_banner = task._uuid
def v2_playbook_on_cleanup_task_start(self, task):
self._task_start(task, prefix='CLEANUP TASK')
def v2_playbook_on_handler_task_start(self, task):
self._task_start(task, prefix='RUNNING HANDLER')
def v2_runner_on_start(self, host, task):
if self.get_option('show_per_host_start'):
self._display.display(" [started %s on %s]" % (task, host), color=C.COLOR_OK)
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if play.check_mode and self.check_mode_markers:
checkmsg = " [CHECK MODE]"
else:
checkmsg = ""
if not name:
msg = u"PLAY%s" % checkmsg
else:
msg = u"PLAY [%s]%s" % (name, checkmsg)
self._play = play
self._display.banner(msg)
def v2_on_file_diff(self, result):
if result._task.loop and 'results' in result._result:
for res in result._result['results']:
if 'diff' in res and res['diff'] and res.get('changed', False):
diff = self._get_diff(res['diff'])
if diff:
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._display.display(diff)
elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False):
diff = self._get_diff(result._result['diff'])
if diff:
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._display.display(diff)
def v2_runner_item_on_ok(self, result):
host_label = self.host_label(result)
if isinstance(result._task, TaskInclude):
return
elif result._result.get('changed', False):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
msg = 'changed'
color = C.COLOR_CHANGED
else:
if not self.display_ok_hosts:
return
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
msg = 'ok'
color = C.COLOR_OK
msg = "%s: [%s] => (item=%s)" % (msg, host_label, self._get_item_label(result._result))
self._clean_results(result._result, result._task.action)
if self._run_is_verbose(result):
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=color)
def v2_runner_item_on_failed(self, result):
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
host_label = self.host_label(result)
self._clean_results(result._result, result._task.action)
self._handle_exception(result._result, use_stderr=self.display_failed_stderr)
msg = "failed: [%s]" % (host_label,)
self._handle_warnings(result._result)
self._display.display(
msg + " (item=%s) => %s" % (self._get_item_label(result._result), self._dump_results(result._result)),
color=C.COLOR_ERROR,
stderr=self.display_failed_stderr
)
def v2_runner_item_on_skipped(self, result):
if self.display_skipped_hosts:
if self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._clean_results(result._result, result._task.action)
msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item_label(result._result))
if self._run_is_verbose(result):
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_include(self, included_file):
msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
label = self._get_item_label(included_file._vars)
if label:
msg += " => (item=%s)" % label
self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_stats(self, stats):
self._display.banner("PLAY RECAP")
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(
u"%s : %s %s %s %s %s %s %s" % (
hostcolor(h, t),
colorize(u'ok', t['ok'], C.COLOR_OK),
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'failed', t['failures'], C.COLOR_ERROR),
colorize(u'skipped', t['skipped'], C.COLOR_SKIP),
colorize(u'rescued', t['rescued'], C.COLOR_OK),
colorize(u'ignored', t['ignored'], C.COLOR_WARN),
),
screen_only=True
)
self._display.display(
u"%s : %s %s %s %s %s %s %s" % (
hostcolor(h, t, False),
colorize(u'ok', t['ok'], None),
colorize(u'changed', t['changed'], None),
colorize(u'unreachable', t['unreachable'], None),
colorize(u'failed', t['failures'], None),
colorize(u'skipped', t['skipped'], None),
colorize(u'rescued', t['rescued'], None),
colorize(u'ignored', t['ignored'], None),
),
log_only=True
)
self._display.display("", screen_only=True)
# print custom stats if required
if stats.custom and self.show_custom_stats:
self._display.banner("CUSTOM STATS: ")
# per host
# TODO: come up with 'pretty format'
for k in sorted(stats.custom.keys()):
if k == '_run':
continue
self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
# print per run custom stats
if '_run' in stats.custom:
self._display.display("", screen_only=True)
self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
self._display.display("", screen_only=True)
if context.CLIARGS['check'] and self.check_mode_markers:
self._display.banner("DRY RUN")
def v2_playbook_on_start(self, playbook):
if self._display.verbosity > 1:
from os.path import basename
self._display.banner("PLAYBOOK: %s" % basename(playbook._file_name))
# show CLI arguments
if self._display.verbosity > 3:
if context.CLIARGS.get('args'):
self._display.display('Positional arguments: %s' % ' '.join(context.CLIARGS['args']),
color=C.COLOR_VERBOSE, screen_only=True)
for argument in (a for a in context.CLIARGS if a != 'args'):
val = context.CLIARGS[argument]
if val:
self._display.display('%s: %s' % (argument, val), color=C.COLOR_VERBOSE, screen_only=True)
if context.CLIARGS['check'] and self.check_mode_markers:
self._display.banner("DRY RUN")
def v2_runner_retry(self, result):
task_name = result.task_name or result._task
host_label = self.host_label(result)
msg = "FAILED - RETRYING: [%s]: %s (%d retries left)." % (host_label, task_name, result._result['retries'] - result._result['attempts'])
if self._run_is_verbose(result, verbosity=2):
msg += "Result was: %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_DEBUG)
def v2_runner_on_async_poll(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
started = result._result.get('started')
finished = result._result.get('finished')
self._display.display(
'ASYNC POLL on %s: jid=%s started=%s finished=%s' % (host, jid, started, finished),
color=C.COLOR_DEBUG
)
def v2_runner_on_async_ok(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self._display.display("ASYNC OK on %s: jid=%s" % (host, jid), color=C.COLOR_DEBUG)
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
# Attempt to get the async job ID. If the job does not finish before the
# async timeout value, the ID may be within the unparsed 'async_result' dict.
jid = result._result.get('ansible_job_id')
if not jid and 'async_result' in result._result:
jid = result._result['async_result'].get('ansible_job_id')
self._display.display("ASYNC FAILED on %s: jid=%s" % (host, jid), color=C.COLOR_DEBUG)
def v2_playbook_on_notify(self, handler, host):
if self._display.verbosity > 1:
self._display.display("NOTIFIED HANDLER %s for %s" % (handler.get_name(), host), color=C.COLOR_VERBOSE, screen_only=True)
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,782 |
Task names on wrong tasks when strategy=free
|
### Summary
In 2.11.7 when using strategy=free, the task names that are printed for each task are often names from different tasks and not the task that is being executed.
The task names are mixed up between hosts, between lines within a role, and even between roles (task files).
Also TASK lines are printed without any other output, not even name of the host where the supposed task is running.
### Issue Type
Bug Report
### Component Name
pip
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.7]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/x/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /opt/venv/lib64/python3.6/site-packages/ansible
ansible collection location = /home/x/.ansible/collections:/usr/share/ansible/collections
executable location = /opt/venv/bin/ansible
python version = 3.6.8 (default, Nov 16 2020, 16:55:22) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)]
jinja version = 3.0.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
CALLBACKS_ENABLED(/etc/ansible/ansible.cfg) = ['ansible.posix.timer']
CONTROLLER_PYTHON_WARNING(/etc/ansible/ansible.cfg) = False
DEFAULT_FORKS(/etc/ansible/ansible.cfg) = 10
DEFAULT_STDOUT_CALLBACK(/etc/ansible/ansible.cfg) = community.general.yaml
DEFAULT_STRATEGY(/etc/ansible/ansible.cfg) = ansible.builtin.free
DEFAULT_TIMEOUT(/etc/ansible/ansible.cfg) = 30
DEFAULT_VAULT_ID_MATCH(/etc/ansible/ansible.cfg) = True
INJECT_FACTS_AS_VARS(/etc/ansible/ansible.cfg) = False
```
### OS / Environment
EL7
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
### Expected Results
As in 2.9 Ansible should not print useless "TASK" lines and should print the right task name to go with each task output.
### Actual Results
```console
Example:
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:184
ok: [hostb.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:184
ok: [hostc.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
ok: [hosta.example.com] => changed=false
msg: ''
rc: 0
results:
- 2:xinetd-2.3.15-14.el7.x86_64 providing xinetd is already installed
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
ok: [hostb.example.com] => changed=false
msg: ''
rc: 0
results:
- 2:xinetd-2.3.15-14.el7.x86_64 providing xinetd is already installed
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
ok: [hostc.example.com] => changed=false
msg: ''
rc: 0
results:
- 2:xinetd-2.3.15-14.el7.x86_64 providing xinetd is already installed
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
ok: [hosta.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:204
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
ok: [hostb.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:204
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
ok: [hostc.example.com] => changed=false
backup: ''
msg: ''
The above log consists of 14 "TASK" lines. Most of those are wrong:
1. Correct task name for line number 190. But no hostname and no output from task, seems this line is useless.
2. Incorrect task name for line number 184, as seen from command output it is a "lineinfile" task, named "Add line in file". It is not a "yum" task.
3. Repeating #1.
4. Same as #2 but different host.
5. Finally a correct task name, this is a "yum" task output.
6. Correct task name for line number 196. But no hostname and no output from task, seems this line is useless.
7. Incorrect task name for line number 190, this time it is indeed a "yum" task but the TASK name printed is for a "lineinfile" task.
8. Correct task name for line number 196. But no hostname and no output from task, seems this line is useless.
9. Same as #7.
10. Finally a correct task name, this is a "lineinfile" task output.
11. Correct task name for line number 204. But no hostname and no output from task, seems this line is useless.
12. Incorrect task name for line number 196.
13. Correct task name for line number 204. But no hostname and no output from task, seems this line is useless.
14. Incorrect task name for line number 196.
Out of 14 (!!!) "TASK" lines and all the other lines that go with them, only:
- 3 occurrences had right task name for the line number and had some useful information (host name, task output).
- 6 lines (and the following role file + line number, empty line) seemed useless.
This makes Ansible output hugely long with all the useless 3 line combinations of TASK+file/linenumber+empty line.
Not to mention I have to manually decode each file + line number because the task name printed is often wrong.
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76782
|
https://github.com/ansible/ansible/pull/76787
|
6de06377d0671fa9b3f19241dcacf9dbb093fcd8
|
29bdb8bf1e1ab7ca35721dad1a58efc966d56bd4
| 2022-01-18T11:18:44Z |
python
| 2022-01-18T19:57:42Z |
test/integration/targets/callback_default/callback_default.out.fqcn_free.stdout
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,782 |
Task names on wrong tasks when strategy=free
|
### Summary
In 2.11.7 when using strategy=free, the task names that are printed for each task are often names from different tasks and not the task that is being executed.
The task names are mixed up between hosts, between lines within a role, and even between roles (task files).
Also TASK lines are printed without any other output, not even name of the host where the supposed task is running.
### Issue Type
Bug Report
### Component Name
pip
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.7]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/x/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /opt/venv/lib64/python3.6/site-packages/ansible
ansible collection location = /home/x/.ansible/collections:/usr/share/ansible/collections
executable location = /opt/venv/bin/ansible
python version = 3.6.8 (default, Nov 16 2020, 16:55:22) [GCC 4.8.5 20150623 (Red Hat 4.8.5-44)]
jinja version = 3.0.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
CALLBACKS_ENABLED(/etc/ansible/ansible.cfg) = ['ansible.posix.timer']
CONTROLLER_PYTHON_WARNING(/etc/ansible/ansible.cfg) = False
DEFAULT_FORKS(/etc/ansible/ansible.cfg) = 10
DEFAULT_STDOUT_CALLBACK(/etc/ansible/ansible.cfg) = community.general.yaml
DEFAULT_STRATEGY(/etc/ansible/ansible.cfg) = ansible.builtin.free
DEFAULT_TIMEOUT(/etc/ansible/ansible.cfg) = 30
DEFAULT_VAULT_ID_MATCH(/etc/ansible/ansible.cfg) = True
INJECT_FACTS_AS_VARS(/etc/ansible/ansible.cfg) = False
```
### OS / Environment
EL7
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
### Expected Results
As in 2.9 Ansible should not print useless "TASK" lines and should print the right task name to go with each task output.
### Actual Results
```console
Example:
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:184
ok: [hostb.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:184
ok: [hostc.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Install xinetd with yum] *************************************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
ok: [hosta.example.com] => changed=false
msg: ''
rc: 0
results:
- 2:xinetd-2.3.15-14.el7.x86_64 providing xinetd is already installed
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
ok: [hostb.example.com] => changed=false
msg: ''
rc: 0
results:
- 2:xinetd-2.3.15-14.el7.x86_64 providing xinetd is already installed
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:190
ok: [hostc.example.com] => changed=false
msg: ''
rc: 0
results:
- 2:xinetd-2.3.15-14.el7.x86_64 providing xinetd is already installed
TASK [rolename : Change line in configuration file] ***************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
ok: [hosta.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:204
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
ok: [hostb.example.com] => changed=false
backup: ''
msg: ''
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:204
TASK [rolename : Change another line in file] **************************************************************************************************************
task path: /home/x/roles/rolename/tasks/main.yml:196
ok: [hostc.example.com] => changed=false
backup: ''
msg: ''
The above log consists of 14 "TASK" lines. Most of those are wrong:
1. Correct task name for line number 190. But no hostname and no output from task, seems this line is useless.
2. Incorrect task name for line number 184, as seen from command output it is a "lineinfile" task, named "Add line in file". It is not a "yum" task.
3. Repeating #1.
4. Same as #2 but different host.
5. Finally a correct task name, this is a "yum" task output.
6. Correct task name for line number 196. But no hostname and no output from task, seems this line is useless.
7. Incorrect task name for line number 190, this time it is indeed a "yum" task but the TASK name printed is for a "lineinfile" task.
8. Correct task name for line number 196. But no hostname and no output from task, seems this line is useless.
9. Same as #7.
10. Finally a correct task name, this is a "lineinfile" task output.
11. Correct task name for line number 204. But no hostname and no output from task, seems this line is useless.
12. Incorrect task name for line number 196.
13. Correct task name for line number 204. But no hostname and no output from task, seems this line is useless.
14. Incorrect task name for line number 196.
Out of 14 (!!!) "TASK" lines and all the other lines that go with them, only:
- 3 occurrences had right task name for the line number and had some useful information (host name, task output).
- 6 lines (and the following role file + line number, empty line) seemed useless.
This makes Ansible output hugely long with all the useless 3 line combinations of TASK+file/linenumber+empty line.
Not to mention I have to manually decode each file + line number because the task name printed is often wrong.
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76782
|
https://github.com/ansible/ansible/pull/76787
|
6de06377d0671fa9b3f19241dcacf9dbb093fcd8
|
29bdb8bf1e1ab7ca35721dad1a58efc966d56bd4
| 2022-01-18T11:18:44Z |
python
| 2022-01-18T19:57:42Z |
test/integration/targets/callback_default/runme.sh
|
#!/usr/bin/env bash
# This test compares "known good" output with various settings against output
# with the current code. It's brittle by nature, but this is probably the
# "best" approach possible.
#
# Notes:
# * options passed to this script (such as -v) are ignored, as they would change
# the output and break the test
# * the number of asterisks after a "banner" differs depending on the number of
# columns on the TTY, so we must adjust the columns for the current session
# for consistency
set -eux
run_test() {
local testname=$1
local playbook=$2
# output was recorded w/o cowsay, ensure we reproduce the same
export ANSIBLE_NOCOWS=1
# The shenanigans with redirection and 'tee' are to capture STDOUT and
# STDERR separately while still displaying both to the console
{ ansible-playbook -i inventory "$playbook" "${@:3}" \
> >(set +x; tee "${OUTFILE}.${testname}.stdout"); } \
2> >(set +x; tee "${OUTFILE}.${testname}.stderr" >&2)
# Scrub deprication warning that shows up in Python 2.6 on CentOS 6
sed -i -e '/RandomPool_DeprecationWarning/d' "${OUTFILE}.${testname}.stderr"
sed -i -e 's/included: .*\/test\/integration/included: ...\/test\/integration/g' "${OUTFILE}.${testname}.stdout"
sed -i -e 's/@@ -1,1 +1,1 @@/@@ -1 +1 @@/g' "${OUTFILE}.${testname}.stdout"
sed -i -e 's/: .*\/test_diff\.txt/: ...\/test_diff.txt/g' "${OUTFILE}.${testname}.stdout"
sed -i -e "s#${ANSIBLE_PLAYBOOK_DIR}#TEST_PATH#g" "${OUTFILE}.${testname}.stdout"
sed -i -e 's/^Using .*//g' "${OUTFILE}.${testname}.stdout"
sed -i -e 's/[0-9]:[0-9]\{2\}:[0-9]\{2\}\.[0-9]\{6\}/0:00:00.000000/g' "${OUTFILE}.${testname}.stdout"
sed -i -e 's/[0-9]\{4\}-[0-9]\{2\}-[0-9]\{2\} [0-9]\{2\}:[0-9]\{2\}:[0-9]\{2\}\.[0-9]\{6\}/0000-00-00 00:00:00.000000/g' "${OUTFILE}.${testname}.stdout"
sed -i -e 's#: .*/source$#: .../source#g' "${OUTFILE}.${testname}.stdout"
sed -i -e '/secontext:/d' "${OUTFILE}.${testname}.stdout"
sed -i -e 's/group: wheel/group: root/g' "${OUTFILE}.${testname}.stdout"
diff -u "${ORIGFILE}.${testname}.stdout" "${OUTFILE}.${testname}.stdout" || diff_failure
diff -u "${ORIGFILE}.${testname}.stderr" "${OUTFILE}.${testname}.stderr" || diff_failure
}
run_test_dryrun() {
local testname=$1
# optional, pass --check to run a dry run
local chk=${2:-}
# outout was recorded w/o cowsay, ensure we reproduce the same
export ANSIBLE_NOCOWS=1
# This needed to satisfy shellcheck that can not accept unquoted variable
cmd="ansible-playbook -i inventory ${chk} test_dryrun.yml"
# The shenanigans with redirection and 'tee' are to capture STDOUT and
# STDERR separately while still displaying both to the console
{ $cmd \
> >(set +x; tee "${OUTFILE}.${testname}.stdout"); } \
2> >(set +x; tee "${OUTFILE}.${testname}.stderr" >&2)
# Scrub deprication warning that shows up in Python 2.6 on CentOS 6
sed -i -e '/RandomPool_DeprecationWarning/d' "${OUTFILE}.${testname}.stderr"
diff -u "${ORIGFILE}.${testname}.stdout" "${OUTFILE}.${testname}.stdout" || diff_failure
diff -u "${ORIGFILE}.${testname}.stderr" "${OUTFILE}.${testname}.stderr" || diff_failure
}
diff_failure() {
if [[ $INIT = 0 ]]; then
echo "FAILURE...diff mismatch!"
exit 1
fi
}
cleanup() {
if [[ $INIT = 0 ]]; then
rm -rf "${OUTFILE}.*"
fi
if [[ -f "${BASEFILE}.unreachable.stdout" ]]; then
rm -rf "${BASEFILE}.unreachable.stdout"
fi
if [[ -f "${BASEFILE}.unreachable.stderr" ]]; then
rm -rf "${BASEFILE}.unreachable.stderr"
fi
# Restore TTY cols
if [[ -n ${TTY_COLS:-} ]]; then
stty cols "${TTY_COLS}"
fi
}
adjust_tty_cols() {
if [[ -t 1 ]]; then
# Preserve existing TTY cols
TTY_COLS=$( stty -a | grep -Eo '; columns [0-9]+;' | cut -d';' -f2 | cut -d' ' -f3 )
# Override TTY cols to make comparing ansible-playbook output easier
# This value matches the default in the code when there is no TTY
stty cols 79
fi
}
BASEFILE=callback_default.out
ORIGFILE="${BASEFILE}"
OUTFILE="${BASEFILE}.new"
trap 'cleanup' EXIT
# The --init flag will (re)generate the "good" output files used by the tests
INIT=0
if [[ ${1:-} == "--init" ]]; then
shift
OUTFILE=$ORIGFILE
INIT=1
fi
adjust_tty_cols
# Force the 'default' callback plugin, since that's what we're testing
export ANSIBLE_STDOUT_CALLBACK=default
# Disable color in output for consistency
export ANSIBLE_FORCE_COLOR=0
export ANSIBLE_NOCOLOR=1
# Default settings
export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1
export ANSIBLE_DISPLAY_OK_HOSTS=1
export ANSIBLE_DISPLAY_FAILED_STDERR=0
export ANSIBLE_CHECK_MODE_MARKERS=0
run_test default test.yml
# Check for async output
# NOTE: regex to match 1 or more digits works for both BSD and GNU grep
ansible-playbook -i inventory test_async.yml 2>&1 | tee async_test.out
grep "ASYNC OK .* jid=[0-9]\{1,\}" async_test.out
grep "ASYNC FAILED .* jid=[0-9]\{1,\}" async_test.out
rm -f async_test.out
# Hide skipped
export ANSIBLE_DISPLAY_SKIPPED_HOSTS=0
run_test hide_skipped test.yml
# Hide skipped/ok
export ANSIBLE_DISPLAY_SKIPPED_HOSTS=0
export ANSIBLE_DISPLAY_OK_HOSTS=0
run_test hide_skipped_ok test.yml
# Hide ok
export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1
export ANSIBLE_DISPLAY_OK_HOSTS=0
run_test hide_ok test.yml
# Failed to stderr
export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1
export ANSIBLE_DISPLAY_OK_HOSTS=1
export ANSIBLE_DISPLAY_FAILED_STDERR=1
run_test failed_to_stderr test.yml
export ANSIBLE_DISPLAY_FAILED_STDERR=0
# Test displaying task path on failure
export ANSIBLE_SHOW_TASK_PATH_ON_FAILURE=1
run_test display_path_on_failure test.yml
export ANSIBLE_SHOW_TASK_PATH_ON_FAILURE=0
# Default settings with unreachable tasks
export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1
export ANSIBLE_DISPLAY_OK_HOSTS=1
export ANSIBLE_DISPLAY_FAILED_STDERR=1
export ANSIBLE_TIMEOUT=1
# Check if UNREACHBLE is available in stderr
set +e
ansible-playbook -i inventory test_2.yml > >(set +x; tee "${BASEFILE}.unreachable.stdout";) 2> >(set +x; tee "${BASEFILE}.unreachable.stderr" >&2) || true
set -e
if test "$(grep -c 'UNREACHABLE' "${BASEFILE}.unreachable.stderr")" -ne 1; then
echo "Test failed"
exit 1
fi
export ANSIBLE_DISPLAY_FAILED_STDERR=0
export ANSIBLE_CALLBACK_RESULT_FORMAT=yaml
run_test result_format_yaml test.yml
export ANSIBLE_CALLBACK_RESULT_FORMAT=json
export ANSIBLE_CALLBACK_RESULT_FORMAT=yaml
export ANSIBLE_CALLBACK_FORMAT_PRETTY=1
run_test result_format_yaml_lossy_verbose test.yml -v
run_test yaml_result_format_yaml_verbose test_yaml.yml -v
export ANSIBLE_CALLBACK_RESULT_FORMAT=json
unset ANSIBLE_CALLBACK_FORMAT_PRETTY
export ANSIBLE_CALLBACK_RESULT_FORMAT=yaml
export ANSIBLE_CALLBACK_FORMAT_PRETTY=0
run_test result_format_yaml_verbose test.yml -v
export ANSIBLE_CALLBACK_RESULT_FORMAT=json
unset ANSIBLE_CALLBACK_FORMAT_PRETTY
## DRY RUN tests
#
# Default settings with dry run tasks
export ANSIBLE_DISPLAY_SKIPPED_HOSTS=1
export ANSIBLE_DISPLAY_OK_HOSTS=1
export ANSIBLE_DISPLAY_FAILED_STDERR=1
# Enable Check mode markers
export ANSIBLE_CHECK_MODE_MARKERS=1
# Test the wet run with check markers
run_test_dryrun check_markers_wet
# Test the dry run with check markers
run_test_dryrun check_markers_dry --check
# Disable Check mode markers
export ANSIBLE_CHECK_MODE_MARKERS=0
# Test the wet run without check markers
run_test_dryrun check_nomarkers_wet
# Test the dry run without check markers
run_test_dryrun check_nomarkers_dry --check
# Make sure implicit meta tasks are not printed
ansible-playbook -i host1,host2 no_implicit_meta_banners.yml > meta_test.out
cat meta_test.out
[ "$(grep -c 'TASK \[meta\]' meta_test.out)" -eq 0 ]
rm -f meta_test.out
# Ensure free/host_pinned non-lockstep strategies display correctly
diff -u callback_default.out.free.stdout <(ANSIBLE_STRATEGY=free ansible-playbook -i inventory test_non_lockstep.yml 2>/dev/null)
diff -u callback_default.out.host_pinned.stdout <(ANSIBLE_STRATEGY=host_pinned ansible-playbook -i inventory test_non_lockstep.yml 2>/dev/null)
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,769 |
Parameterize disable_lookups in Constructable
|
### Summary
Currently, `disable_lookups=True` is hard-coded, it would be nice if this could be overridden.
### Issue Type
Bug Report
### Component Name
inventory
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.1]
config file = /Users/mark/.ansible.cfg
configured module search path = ['/Users/mark/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/mark/.ansible/collections/ansible_collections/community/digitalocean/venv/lib/python3.9/site-packages/ansible
ansible collection location = /Users/mark/.ansible/collections:/usr/share/ansible/collections
executable location = /Users/mark/.ansible/collections/ansible_collections/community/digitalocean/venv/bin/ansible
python version = 3.9.9 (main, Dec 9 2021, 19:41:06) [Clang 13.0.0 (clang-1300.0.29.3)]
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
DEFAULT_STDOUT_CALLBACK(/Users/mark/.ansible.cfg) = yaml
DIFF_ALWAYS(/Users/mark/.ansible.cfg) = True
INTERPRETER_PYTHON(/Users/mark/.ansible.cfg) = /Users/mark/.pyenv/shims/python3
```
### OS / Environment
MacOS 12.0.1
### Steps to Reproduce
There's nothing to reproduce, it's an observation.
### Expected Results
N/A
### Actual Results
```console
N/A
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76769
|
https://github.com/ansible/ansible/pull/76770
|
553e75df95767d0eba8d906c6fac034a0fc0ab1c
|
36519ff95eddea72d7c16f9a7c3e8501ee0ab60e
| 2022-01-15T16:02:41Z |
python
| 2022-01-19T15:49:50Z |
changelogs/fragments/76769-inventory-constructable-disable-lookups.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,769 |
Parameterize disable_lookups in Constructable
|
### Summary
Currently, `disable_lookups=True` is hard-coded, it would be nice if this could be overridden.
### Issue Type
Bug Report
### Component Name
inventory
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.1]
config file = /Users/mark/.ansible.cfg
configured module search path = ['/Users/mark/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/mark/.ansible/collections/ansible_collections/community/digitalocean/venv/lib/python3.9/site-packages/ansible
ansible collection location = /Users/mark/.ansible/collections:/usr/share/ansible/collections
executable location = /Users/mark/.ansible/collections/ansible_collections/community/digitalocean/venv/bin/ansible
python version = 3.9.9 (main, Dec 9 2021, 19:41:06) [Clang 13.0.0 (clang-1300.0.29.3)]
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
DEFAULT_STDOUT_CALLBACK(/Users/mark/.ansible.cfg) = yaml
DIFF_ALWAYS(/Users/mark/.ansible.cfg) = True
INTERPRETER_PYTHON(/Users/mark/.ansible.cfg) = /Users/mark/.pyenv/shims/python3
```
### OS / Environment
MacOS 12.0.1
### Steps to Reproduce
There's nothing to reproduce, it's an observation.
### Expected Results
N/A
### Actual Results
```console
N/A
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76769
|
https://github.com/ansible/ansible/pull/76770
|
553e75df95767d0eba8d906c6fac034a0fc0ab1c
|
36519ff95eddea72d7c16f9a7c3e8501ee0ab60e
| 2022-01-15T16:02:41Z |
python
| 2022-01-19T15:49:50Z |
lib/ansible/plugins/inventory/__init__.py
|
# (c) 2017, Red Hat, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import hashlib
import os
import string
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.inventory.group import to_safe_group_name as original_safe
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins import AnsiblePlugin
from ansible.plugins.cache import CachePluginAdjudicator as CacheObject
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.module_utils.six import string_types
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.vars import combine_vars, load_extra_vars
display = Display()
# Helper methods
def to_safe_group_name(name):
# placeholder for backwards compat
return original_safe(name, force=True, silent=True)
def detect_range(line=None):
'''
A helper function that checks a given host line to see if it contains
a range pattern described in the docstring above.
Returns True if the given line contains a pattern, else False.
'''
return '[' in line
def expand_hostname_range(line=None):
'''
A helper function that expands a given line that contains a pattern
specified in top docstring, and returns a list that consists of the
expanded version.
The '[' and ']' characters are used to maintain the pseudo-code
appearance. They are replaced in this function with '|' to ease
string splitting.
References: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html#hosts-and-groups
'''
all_hosts = []
if line:
# A hostname such as db[1:6]-node is considered to consists
# three parts:
# head: 'db'
# nrange: [1:6]; range() is a built-in. Can't use the name
# tail: '-node'
# Add support for multiple ranges in a host so:
# db[01:10:3]node-[01:10]
# - to do this we split off at the first [...] set, getting the list
# of hosts and then repeat until none left.
# - also add an optional third parameter which contains the step. (Default: 1)
# so range can be [01:10:2] -> 01 03 05 07 09
(head, nrange, tail) = line.replace('[', '|', 1).replace(']', '|', 1).split('|')
bounds = nrange.split(":")
if len(bounds) != 2 and len(bounds) != 3:
raise AnsibleError("host range must be begin:end or begin:end:step")
beg = bounds[0]
end = bounds[1]
if len(bounds) == 2:
step = 1
else:
step = bounds[2]
if not beg:
beg = "0"
if not end:
raise AnsibleError("host range must specify end value")
if beg[0] == '0' and len(beg) > 1:
rlen = len(beg) # range length formatting hint
if rlen != len(end):
raise AnsibleError("host range must specify equal-length begin and end formats")
def fill(x):
return str(x).zfill(rlen) # range sequence
else:
fill = str
try:
i_beg = string.ascii_letters.index(beg)
i_end = string.ascii_letters.index(end)
if i_beg > i_end:
raise AnsibleError("host range must have begin <= end")
seq = list(string.ascii_letters[i_beg:i_end + 1:int(step)])
except ValueError: # not an alpha range
seq = range(int(beg), int(end) + 1, int(step))
for rseq in seq:
hname = ''.join((head, fill(rseq), tail))
if detect_range(hname):
all_hosts.extend(expand_hostname_range(hname))
else:
all_hosts.append(hname)
return all_hosts
def get_cache_plugin(plugin_name, **kwargs):
try:
cache = CacheObject(plugin_name, **kwargs)
except AnsibleError as e:
if 'fact_caching_connection' in to_native(e):
raise AnsibleError("error, '%s' inventory cache plugin requires the one of the following to be set "
"to a writeable directory path:\nansible.cfg:\n[default]: fact_caching_connection,\n"
"[inventory]: cache_connection;\nEnvironment:\nANSIBLE_INVENTORY_CACHE_CONNECTION,\n"
"ANSIBLE_CACHE_PLUGIN_CONNECTION." % plugin_name)
else:
raise e
if plugin_name != 'memory' and kwargs and not getattr(cache._plugin, '_options', None):
raise AnsibleError('Unable to use cache plugin {0} for inventory. Cache options were provided but may not reconcile '
'correctly unless set via set_options. Refer to the porting guide if the plugin derives user settings '
'from ansible.constants.'.format(plugin_name))
return cache
class BaseInventoryPlugin(AnsiblePlugin):
""" Parses an Inventory Source"""
TYPE = 'generator'
# 3rd party plugins redefine this to
# use custom group name sanitization
# since constructed features enforce
# it by default.
_sanitize_group_name = staticmethod(to_safe_group_name)
def __init__(self):
super(BaseInventoryPlugin, self).__init__()
self._options = {}
self.inventory = None
self.display = display
self._vars = {}
def parse(self, inventory, loader, path, cache=True):
''' Populates inventory from the given data. Raises an error on any parse failure
:arg inventory: a copy of the previously accumulated inventory data,
to be updated with any new data this plugin provides.
The inventory can be empty if no other source/plugin ran successfully.
:arg loader: a reference to the DataLoader, which can read in YAML and JSON files,
it also has Vault support to automatically decrypt files.
:arg path: the string that represents the 'inventory source',
normally a path to a configuration file for this inventory,
but it can also be a raw string for this plugin to consume
:arg cache: a boolean that indicates if the plugin should use the cache or not
you can ignore if this plugin does not implement caching.
'''
self.loader = loader
self.inventory = inventory
self.templar = Templar(loader=loader)
self._vars = load_extra_vars(loader)
def verify_file(self, path):
''' Verify if file is usable by this plugin, base does minimal accessibility check
:arg path: a string that was passed as an inventory source,
it normally is a path to a config file, but this is not a requirement,
it can also be parsed itself as the inventory data to process.
So only call this base class if you expect it to be a file.
'''
valid = False
b_path = to_bytes(path, errors='surrogate_or_strict')
if (os.path.exists(b_path) and os.access(b_path, os.R_OK)):
valid = True
else:
self.display.vvv('Skipping due to inventory source not existing or not being readable by the current user')
return valid
def _populate_host_vars(self, hosts, variables, group=None, port=None):
if not isinstance(variables, Mapping):
raise AnsibleParserError("Invalid data from file, expected dictionary and got:\n\n%s" % to_native(variables))
for host in hosts:
self.inventory.add_host(host, group=group, port=port)
for k in variables:
self.inventory.set_variable(host, k, variables[k])
def _read_config_data(self, path):
''' validate config and set options as appropriate
:arg path: path to common yaml format config file for this plugin
'''
config = {}
try:
# avoid loader cache so meta: refresh_inventory can pick up config changes
# if we read more than once, fs cache should be good enough
config = self.loader.load_from_file(path, cache=False)
except Exception as e:
raise AnsibleParserError(to_native(e))
# a plugin can be loaded via many different names with redirection- if so, we want to accept any of those names
valid_names = getattr(self, '_redirected_names') or [self.NAME]
if not config:
# no data
raise AnsibleParserError("%s is empty" % (to_native(path)))
elif config.get('plugin') not in valid_names:
# this is not my config file
raise AnsibleParserError("Incorrect plugin name in file: %s" % config.get('plugin', 'none found'))
elif not isinstance(config, Mapping):
# configs are dictionaries
raise AnsibleParserError('inventory source has invalid structure, it should be a dictionary, got: %s' % type(config))
self.set_options(direct=config, var_options=self._vars)
if 'cache' in self._options and self.get_option('cache'):
cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]) is not None)
self._cache = get_cache_plugin(self.get_option('cache_plugin'), **cache_options)
return config
def _consume_options(self, data):
''' update existing options from alternate configuration sources not normally used by Ansible.
Many API libraries already have existing configuration sources, this allows plugin author to leverage them.
:arg data: key/value pairs that correspond to configuration options for this plugin
'''
for k in self._options:
if k in data:
self._options[k] = data.pop(k)
def _expand_hostpattern(self, hostpattern):
'''
Takes a single host pattern and returns a list of hostnames and an
optional port number that applies to all of them.
'''
# Can the given hostpattern be parsed as a host with an optional port
# specification?
try:
(pattern, port) = parse_address(hostpattern, allow_ranges=True)
except Exception:
# not a recognizable host pattern
pattern = hostpattern
port = None
# Once we have separated the pattern, we expand it into list of one or
# more hostnames, depending on whether it contains any [x:y] ranges.
if detect_range(pattern):
hostnames = expand_hostname_range(pattern)
else:
hostnames = [pattern]
return (hostnames, port)
class BaseFileInventoryPlugin(BaseInventoryPlugin):
""" Parses a File based Inventory Source"""
TYPE = 'storage'
def __init__(self):
super(BaseFileInventoryPlugin, self).__init__()
class Cacheable(object):
_cache = CacheObject()
@property
def cache(self):
return self._cache
def load_cache_plugin(self):
plugin_name = self.get_option('cache_plugin')
cache_option_keys = [('_uri', 'cache_connection'), ('_timeout', 'cache_timeout'), ('_prefix', 'cache_prefix')]
cache_options = dict((opt[0], self.get_option(opt[1])) for opt in cache_option_keys if self.get_option(opt[1]) is not None)
self._cache = get_cache_plugin(plugin_name, **cache_options)
def get_cache_key(self, path):
return "{0}_{1}".format(self.NAME, self._get_cache_prefix(path))
def _get_cache_prefix(self, path):
''' create predictable unique prefix for plugin/inventory '''
m = hashlib.sha1()
m.update(to_bytes(self.NAME, errors='surrogate_or_strict'))
d1 = m.hexdigest()
n = hashlib.sha1()
n.update(to_bytes(path, errors='surrogate_or_strict'))
d2 = n.hexdigest()
return 's_'.join([d1[:5], d2[:5]])
def clear_cache(self):
self._cache.flush()
def update_cache_if_changed(self):
self._cache.update_cache_if_changed()
def set_cache_plugin(self):
self._cache.set_cache()
class Constructable(object):
def _compose(self, template, variables):
''' helper method for plugins to compose variables for Ansible based on jinja2 expression and inventory vars'''
t = self.templar
try:
use_extra = self.get_option('use_extra_vars')
except Exception:
use_extra = False
if use_extra:
t.available_variables = combine_vars(variables, self._vars)
else:
t.available_variables = variables
return t.template('%s%s%s' % (t.environment.variable_start_string, template, t.environment.variable_end_string), disable_lookups=True)
def _set_composite_vars(self, compose, variables, host, strict=False):
''' loops over compose entries to create vars for hosts '''
if compose and isinstance(compose, dict):
for varname in compose:
try:
composite = self._compose(compose[varname], variables)
except Exception as e:
if strict:
raise AnsibleError("Could not set %s for host %s: %s" % (varname, host, to_native(e)))
continue
self.inventory.set_variable(host, varname, composite)
def _add_host_to_composed_groups(self, groups, variables, host, strict=False, fetch_hostvars=True):
''' helper to create complex groups for plugins based on jinja2 conditionals, hosts that meet the conditional are added to group'''
# process each 'group entry'
if groups and isinstance(groups, dict):
if fetch_hostvars:
variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
self.templar.available_variables = variables
for group_name in groups:
conditional = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % groups[group_name]
group_name = self._sanitize_group_name(group_name)
try:
result = boolean(self.templar.template(conditional))
except Exception as e:
if strict:
raise AnsibleParserError("Could not add host %s to group %s: %s" % (host, group_name, to_native(e)))
continue
if result:
# ensure group exists, use sanitized name
group_name = self.inventory.add_group(group_name)
# add host to group
self.inventory.add_child(group_name, host)
def _add_host_to_keyed_groups(self, keys, variables, host, strict=False, fetch_hostvars=True):
''' helper to create groups for plugins based on variable values and add the corresponding hosts to it'''
if keys and isinstance(keys, list):
for keyed in keys:
if keyed and isinstance(keyed, dict):
if fetch_hostvars:
variables = combine_vars(variables, self.inventory.get_host(host).get_vars())
try:
key = self._compose(keyed.get('key'), variables)
except Exception as e:
if strict:
raise AnsibleParserError("Could not generate group for host %s from %s entry: %s" % (host, keyed.get('key'), to_native(e)))
continue
default_value_name = keyed.get('default_value', None)
trailing_separator = keyed.get('trailing_separator')
if trailing_separator is not None and default_value_name is not None:
raise AnsibleParserError("parameters are mutually exclusive for keyed groups: default_value|trailing_separator")
if key or (key == '' and default_value_name is not None):
prefix = keyed.get('prefix', '')
sep = keyed.get('separator', '_')
raw_parent_name = keyed.get('parent_group', None)
if raw_parent_name:
try:
raw_parent_name = self.templar.template(raw_parent_name)
except AnsibleError as e:
if strict:
raise AnsibleParserError("Could not generate parent group %s for group %s: %s" % (raw_parent_name, key, to_native(e)))
continue
new_raw_group_names = []
if isinstance(key, string_types):
# if key is empty, 'default_value' will be used as group name
if key == '' and default_value_name is not None:
new_raw_group_names.append(default_value_name)
else:
new_raw_group_names.append(key)
elif isinstance(key, list):
for name in key:
# if list item is empty, 'default_value' will be used as group name
if name == '' and default_value_name is not None:
new_raw_group_names.append(default_value_name)
else:
new_raw_group_names.append(name)
elif isinstance(key, Mapping):
for (gname, gval) in key.items():
bare_name = '%s%s%s' % (gname, sep, gval)
if gval == '':
# key's value is empty
if default_value_name is not None:
bare_name = '%s%s%s' % (gname, sep, default_value_name)
elif trailing_separator is False:
bare_name = gname
new_raw_group_names.append(bare_name)
else:
raise AnsibleParserError("Invalid group name format, expected a string or a list of them or dictionary, got: %s" % type(key))
for bare_name in new_raw_group_names:
if prefix == '' and self.get_option('leading_separator') is False:
sep = ''
gname = self._sanitize_group_name('%s%s%s' % (prefix, sep, bare_name))
result_gname = self.inventory.add_group(gname)
self.inventory.add_host(host, result_gname)
if raw_parent_name:
parent_name = self._sanitize_group_name(raw_parent_name)
self.inventory.add_group(parent_name)
self.inventory.add_child(parent_name, result_gname)
else:
# exclude case of empty list and dictionary, because these are valid constructions
# simply no groups need to be constructed, but are still falsy
if strict and key not in ([], {}):
raise AnsibleParserError("No key or key resulted empty for %s in host %s, invalid entry" % (keyed.get('key'), host))
else:
raise AnsibleParserError("Invalid keyed group entry, it must be a dictionary: %s " % keyed)
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,792 |
Hostname module should support for CBL Mariner Linux
|
### Summary
Attempting to use the `hostname` module on a host running [CBL-Mariner](https://github.com/microsoft/CBL-Mariner) fails with the following error:
`hostname module cannot be used on platform Linux (Mariner)`
It looks like a method specifically for this OS is needed in modules/system/hostname.py.
Similar bugs in the past: #42726 and #58444
A PR to fix similar issues in the past is #56936
### Issue Type
Bug Report
### Component Name
hostname
### Ansible Version
```console
$ ansible --version
ansible 2.9.23
config file = None
configured module search path = ['/home/fedora/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.7/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.7.10 (default, Sep 28 2021, 20:29:11) [GCC 9.1.0]
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
OS is [CBL-Mariner](https://github.com/microsoft/CBL-Mariner)
/etc/os-release
```
NAME="Common Base Linux Mariner"
VERSION="1.0.20211230"
ID=mariner
VERSION_ID=1.0
PRETTY_NAME="CBL-Mariner/Linux"
ANSI_COLOR="1;34"
HOME_URL="https://aka.ms/cbl-mariner"
BUG_REPORT_URL="https://aka.ms/cbl-mariner"
SUPPORT_URL="https://aka.ms/cbl-mariner"
```
/etc/lsb-release
```
DISTRIB_ID="Mariner"
DISTRIB_RELEASE="1.0.20211230"
DISTRIB_CODENAME=Mariner
DISTRIB_DESCRIPTION="CBL-Mariner 1.0.20211230"
```
```
/etc/mariner-release
CBL-Mariner 1.0.20211230
MARINER_BUILD_NUMBER=f33d439
```
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: "ensure hostname is correct"
hostname: name={{ inventory_hostname }}
```
### Expected Results
I expected `hostname` to "just work" given CBL-Mariner is derived from Fedora Server 35.
### Actual Results
```console
Error received: hostname module cannot be used on platform Linux (Mariner)
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76792
|
https://github.com/ansible/ansible/pull/76800
|
36519ff95eddea72d7c16f9a7c3e8501ee0ab60e
|
374720fc97f54c34281a2c78e3ecb12eca45c46c
| 2022-01-18T20:30:39Z |
python
| 2022-01-20T19:25:52Z |
changelogs/fragments/76792-hostname-all-systemd.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,792 |
Hostname module should support for CBL Mariner Linux
|
### Summary
Attempting to use the `hostname` module on a host running [CBL-Mariner](https://github.com/microsoft/CBL-Mariner) fails with the following error:
`hostname module cannot be used on platform Linux (Mariner)`
It looks like a method specifically for this OS is needed in modules/system/hostname.py.
Similar bugs in the past: #42726 and #58444
A PR to fix similar issues in the past is #56936
### Issue Type
Bug Report
### Component Name
hostname
### Ansible Version
```console
$ ansible --version
ansible 2.9.23
config file = None
configured module search path = ['/home/fedora/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.7/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.7.10 (default, Sep 28 2021, 20:29:11) [GCC 9.1.0]
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
OS is [CBL-Mariner](https://github.com/microsoft/CBL-Mariner)
/etc/os-release
```
NAME="Common Base Linux Mariner"
VERSION="1.0.20211230"
ID=mariner
VERSION_ID=1.0
PRETTY_NAME="CBL-Mariner/Linux"
ANSI_COLOR="1;34"
HOME_URL="https://aka.ms/cbl-mariner"
BUG_REPORT_URL="https://aka.ms/cbl-mariner"
SUPPORT_URL="https://aka.ms/cbl-mariner"
```
/etc/lsb-release
```
DISTRIB_ID="Mariner"
DISTRIB_RELEASE="1.0.20211230"
DISTRIB_CODENAME=Mariner
DISTRIB_DESCRIPTION="CBL-Mariner 1.0.20211230"
```
```
/etc/mariner-release
CBL-Mariner 1.0.20211230
MARINER_BUILD_NUMBER=f33d439
```
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
- name: "ensure hostname is correct"
hostname: name={{ inventory_hostname }}
```
### Expected Results
I expected `hostname` to "just work" given CBL-Mariner is derived from Fedora Server 35.
### Actual Results
```console
Error received: hostname module cannot be used on platform Linux (Mariner)
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76792
|
https://github.com/ansible/ansible/pull/76800
|
36519ff95eddea72d7c16f9a7c3e8501ee0ab60e
|
374720fc97f54c34281a2c78e3ecb12eca45c46c
| 2022-01-18T20:30:39Z |
python
| 2022-01-20T19:25:52Z |
lib/ansible/modules/hostname.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Hiroaki Nakamura <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: hostname
author:
- Adrian Likins (@alikins)
- Hideki Saito (@saito-hideki)
version_added: "1.4"
short_description: Manage hostname
requirements: [ hostname ]
description:
- Set system's hostname. Supports most OSs/Distributions including those using C(systemd).
- Windows, HP-UX, and AIX are not currently supported.
notes:
- This module does B(NOT) modify C(/etc/hosts). You need to modify it yourself using other modules such as M(ansible.builtin.template)
or M(ansible.builtin.replace).
- On macOS, this module uses C(scutil) to set C(HostName), C(ComputerName), and C(LocalHostName). Since C(LocalHostName)
cannot contain spaces or most special characters, this module will replace characters when setting C(LocalHostName).
options:
name:
description:
- Name of the host.
- If the value is a fully qualified domain name that does not resolve from the given host,
this will cause the module to hang for a few seconds while waiting for the name resolution attempt to timeout.
type: str
required: true
use:
description:
- Which strategy to use to update the hostname.
- If not set we try to autodetect, but this can be problematic, particularly with containers as they can present misleading information.
- Note that 'systemd' should be specified for RHEL/EL/CentOS 7+. Older distributions should use 'redhat'.
choices: ['alpine', 'debian', 'freebsd', 'generic', 'macos', 'macosx', 'darwin', 'openbsd', 'openrc', 'redhat', 'sles', 'solaris', 'systemd']
type: str
version_added: '2.9'
extends_documentation_fragment:
- action_common_attributes
- action_common_attributes.facts
attributes:
check_mode:
support: full
diff_mode:
support: full
facts:
support: full
platform:
platforms: posix
'''
EXAMPLES = '''
- name: Set a hostname
ansible.builtin.hostname:
name: web01
- name: Set a hostname specifying strategy
ansible.builtin.hostname:
name: web01
use: systemd
'''
import os
import platform
import socket
import traceback
from ansible.module_utils.basic import (
AnsibleModule,
get_distribution,
get_distribution_version,
)
from ansible.module_utils.common.sys_info import get_platform_subclass
from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
from ansible.module_utils.facts.utils import get_file_lines, get_file_content
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.six import PY3, text_type
STRATS = {
'alpine': 'Alpine',
'debian': 'Debian',
'freebsd': 'FreeBSD',
'generic': 'Generic',
'macos': 'Darwin',
'macosx': 'Darwin',
'darwin': 'Darwin',
'openbsd': 'OpenBSD',
'openrc': 'OpenRC',
'redhat': 'RedHat',
'sles': 'SLES',
'solaris': 'Solaris',
'systemd': 'Systemd',
}
class UnimplementedStrategy(object):
def __init__(self, module):
self.module = module
def update_current_and_permanent_hostname(self):
self.unimplemented_error()
def update_current_hostname(self):
self.unimplemented_error()
def update_permanent_hostname(self):
self.unimplemented_error()
def get_current_hostname(self):
self.unimplemented_error()
def set_current_hostname(self, name):
self.unimplemented_error()
def get_permanent_hostname(self):
self.unimplemented_error()
def set_permanent_hostname(self, name):
self.unimplemented_error()
def unimplemented_error(self):
system = platform.system()
distribution = get_distribution()
if distribution is not None:
msg_platform = '%s (%s)' % (system, distribution)
else:
msg_platform = system
self.module.fail_json(
msg='hostname module cannot be used on platform %s' % msg_platform)
class Hostname(object):
"""
This is a generic Hostname manipulation class that is subclassed
based on platform.
A subclass may wish to set different strategy instance to self.strategy.
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
strategy_class = UnimplementedStrategy
def __new__(cls, *args, **kwargs):
new_cls = get_platform_subclass(Hostname)
return super(cls, new_cls).__new__(new_cls)
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.use = module.params['use']
if self.use is not None:
strat = globals()['%sStrategy' % STRATS[self.use]]
self.strategy = strat(module)
elif self.platform == 'Linux' and ServiceMgrFactCollector.is_systemd_managed(module):
self.strategy = SystemdStrategy(module)
else:
self.strategy = self.strategy_class(module)
def update_current_and_permanent_hostname(self):
return self.strategy.update_current_and_permanent_hostname()
def get_current_hostname(self):
return self.strategy.get_current_hostname()
def set_current_hostname(self, name):
self.strategy.set_current_hostname(name)
def get_permanent_hostname(self):
return self.strategy.get_permanent_hostname()
def set_permanent_hostname(self, name):
self.strategy.set_permanent_hostname(name)
class BaseStrategy(object):
def __init__(self, module):
self.module = module
self.changed = False
def update_current_and_permanent_hostname(self):
self.update_current_hostname()
self.update_permanent_hostname()
return self.changed
def update_current_hostname(self):
name = self.module.params['name']
current_name = self.get_current_hostname()
if current_name != name:
if not self.module.check_mode:
self.set_current_hostname(name)
self.changed = True
def update_permanent_hostname(self):
name = self.module.params['name']
permanent_name = self.get_permanent_hostname()
if permanent_name != name:
if not self.module.check_mode:
self.set_permanent_hostname(name)
self.changed = True
def get_current_hostname(self):
return self.get_permanent_hostname()
def set_current_hostname(self, name):
pass
def get_permanent_hostname(self):
raise NotImplementedError
def set_permanent_hostname(self, name):
raise NotImplementedError
class CommandStrategy(BaseStrategy):
COMMAND = 'hostname'
def __init__(self, module):
super(CommandStrategy, self).__init__(module)
self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
return 'UNKNOWN'
def set_permanent_hostname(self, name):
pass
class FileStrategy(BaseStrategy):
FILE = '/etc/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.FILE):
return ''
try:
return get_file_lines(self.FILE)
except Exception as e:
self.module.fail_json(
msg="failed to read hostname: %s" % to_native(e),
exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
with open(self.FILE, 'w+') as f:
f.write("%s\n" % name)
except Exception as e:
self.module.fail_json(
msg="failed to update hostname: %s" % to_native(e),
exception=traceback.format_exc())
class SLESStrategy(FileStrategy):
"""
This is a SLES Hostname strategy class - it edits the
/etc/HOSTNAME file.
"""
FILE = '/etc/HOSTNAME'
class RedHatStrategy(BaseStrategy):
"""
This is a Redhat Hostname strategy class - it edits the
/etc/sysconfig/network file.
"""
NETWORK_FILE = '/etc/sysconfig/network'
def get_permanent_hostname(self):
try:
for line in get_file_lines(self.NETWORK_FILE):
line = to_native(line).strip()
if line.startswith('HOSTNAME'):
k, v = line.split('=')
return v.strip()
self.module.fail_json(
"Unable to locate HOSTNAME entry in %s" % self.NETWORK_FILE
)
except Exception as e:
self.module.fail_json(
msg="failed to read hostname: %s" % to_native(e),
exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
lines = []
found = False
content = get_file_content(self.NETWORK_FILE, strip=False) or ""
for line in content.splitlines(True):
line = to_native(line)
if line.strip().startswith('HOSTNAME'):
lines.append("HOSTNAME=%s\n" % name)
found = True
else:
lines.append(line)
if not found:
lines.append("HOSTNAME=%s\n" % name)
with open(self.NETWORK_FILE, 'w+') as f:
f.writelines(lines)
except Exception as e:
self.module.fail_json(
msg="failed to update hostname: %s" % to_native(e),
exception=traceback.format_exc())
class AlpineStrategy(FileStrategy):
"""
This is a Alpine Linux Hostname manipulation strategy class - it edits
the /etc/hostname file then run hostname -F /etc/hostname.
"""
FILE = '/etc/hostname'
COMMAND = 'hostname'
def set_current_hostname(self, name):
super(AlpineStrategy, self).set_current_hostname(name)
hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
cmd = [hostname_cmd, '-F', self.FILE]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class SystemdStrategy(BaseStrategy):
"""
This is a Systemd hostname manipulation strategy class - it uses
the hostnamectl command.
"""
COMMAND = "hostnamectl"
def __init__(self, module):
super(SystemdStrategy, self).__init__(module)
self.hostnamectl_cmd = self.module.get_bin_path(self.COMMAND, True)
def get_current_hostname(self):
cmd = [self.hostnamectl_cmd, '--transient', 'status']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = [self.hostnamectl_cmd, '--transient', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
cmd = [self.hostnamectl_cmd, '--static', 'status']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
if len(name) > 64:
self.module.fail_json(msg="name cannot be longer than 64 characters on systemd servers, try a shorter name")
cmd = [self.hostnamectl_cmd, '--pretty', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
cmd = [self.hostnamectl_cmd, '--static', 'set-hostname', name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class OpenRCStrategy(BaseStrategy):
"""
This is a Gentoo (OpenRC) Hostname manipulation strategy class - it edits
the /etc/conf.d/hostname file.
"""
FILE = '/etc/conf.d/hostname'
def get_permanent_hostname(self):
if not os.path.isfile(self.FILE):
return ''
try:
for line in get_file_lines(self.FILE):
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception as e:
self.module.fail_json(
msg="failed to read hostname: %s" % to_native(e),
exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
lines = [x.strip() for x in get_file_lines(self.FILE)]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
with open(self.FILE, 'w') as f:
f.write('\n'.join(lines) + '\n')
except Exception as e:
self.module.fail_json(
msg="failed to update hostname: %s" % to_native(e),
exception=traceback.format_exc())
class OpenBSDStrategy(FileStrategy):
"""
This is a OpenBSD family Hostname manipulation strategy class - it edits
the /etc/myname file.
"""
FILE = '/etc/myname'
class SolarisStrategy(BaseStrategy):
"""
This is a Solaris11 or later Hostname manipulation strategy class - it
execute hostname command.
"""
COMMAND = "hostname"
def __init__(self, module):
super(SolarisStrategy, self).__init__(module)
self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
def set_current_hostname(self, name):
cmd_option = '-t'
cmd = [self.hostname_cmd, cmd_option, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
fmri = 'svc:/system/identity:node'
pattern = 'config/nodename'
cmd = '/usr/sbin/svccfg -s %s listprop -o value %s' % (fmri, pattern)
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
class FreeBSDStrategy(BaseStrategy):
"""
This is a FreeBSD hostname manipulation strategy class - it edits
the /etc/rc.conf.d/hostname file.
"""
FILE = '/etc/rc.conf.d/hostname'
COMMAND = "hostname"
def __init__(self, module):
super(FreeBSDStrategy, self).__init__(module)
self.hostname_cmd = self.module.get_bin_path(self.COMMAND, True)
def get_current_hostname(self):
cmd = [self.hostname_cmd]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_current_hostname(self, name):
cmd = [self.hostname_cmd, name]
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" % (rc, out, err))
def get_permanent_hostname(self):
if not os.path.isfile(self.FILE):
return ''
try:
for line in get_file_lines(self.FILE):
line = line.strip()
if line.startswith('hostname='):
return line[10:].strip('"')
except Exception as e:
self.module.fail_json(
msg="failed to read hostname: %s" % to_native(e),
exception=traceback.format_exc())
def set_permanent_hostname(self, name):
try:
if os.path.isfile(self.FILE):
lines = [x.strip() for x in get_file_lines(self.FILE)]
for i, line in enumerate(lines):
if line.startswith('hostname='):
lines[i] = 'hostname="%s"' % name
break
else:
lines = ['hostname="%s"' % name]
with open(self.FILE, 'w') as f:
f.write('\n'.join(lines) + '\n')
except Exception as e:
self.module.fail_json(
msg="failed to update hostname: %s" % to_native(e),
exception=traceback.format_exc())
class DarwinStrategy(BaseStrategy):
"""
This is a macOS hostname manipulation strategy class. It uses
/usr/sbin/scutil to set ComputerName, HostName, and LocalHostName.
HostName corresponds to what most platforms consider to be hostname.
It controls the name used on the command line and SSH.
However, macOS also has LocalHostName and ComputerName settings.
LocalHostName controls the Bonjour/ZeroConf name, used by services
like AirDrop. This class implements a method, _scrub_hostname(), that mimics
the transformations macOS makes on hostnames when enterened in the Sharing
preference pane. It replaces spaces with dashes and removes all special
characters.
ComputerName is the name used for user-facing GUI services, like the
System Preferences/Sharing pane and when users connect to the Mac over the network.
"""
def __init__(self, module):
super(DarwinStrategy, self).__init__(module)
self.scutil = self.module.get_bin_path('scutil', True)
self.name_types = ('HostName', 'ComputerName', 'LocalHostName')
self.scrubbed_name = self._scrub_hostname(self.module.params['name'])
def _make_translation(self, replace_chars, replacement_chars, delete_chars):
if PY3:
return str.maketrans(replace_chars, replacement_chars, delete_chars)
if not isinstance(replace_chars, text_type) or not isinstance(replacement_chars, text_type):
raise ValueError('replace_chars and replacement_chars must both be strings')
if len(replace_chars) != len(replacement_chars):
raise ValueError('replacement_chars must be the same length as replace_chars')
table = dict(zip((ord(c) for c in replace_chars), replacement_chars))
for char in delete_chars:
table[ord(char)] = None
return table
def _scrub_hostname(self, name):
"""
LocalHostName only accepts valid DNS characters while HostName and ComputerName
accept a much wider range of characters. This function aims to mimic how macOS
translates a friendly name to the LocalHostName.
"""
# Replace all these characters with a single dash
name = to_text(name)
replace_chars = u'\'"~`!@#$%^&*(){}[]/=?+\\|-_ '
delete_chars = u".'"
table = self._make_translation(replace_chars, u'-' * len(replace_chars), delete_chars)
name = name.translate(table)
# Replace multiple dashes with a single dash
while '-' * 2 in name:
name = name.replace('-' * 2, '')
name = name.rstrip('-')
return name
def get_current_hostname(self):
cmd = [self.scutil, '--get', 'HostName']
rc, out, err = self.module.run_command(cmd)
if rc != 0 and 'HostName: not set' not in err:
self.module.fail_json(msg="Failed to get current hostname rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def get_permanent_hostname(self):
cmd = [self.scutil, '--get', 'ComputerName']
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Failed to get permanent hostname rc=%d, out=%s, err=%s" % (rc, out, err))
return to_native(out).strip()
def set_permanent_hostname(self, name):
for hostname_type in self.name_types:
cmd = [self.scutil, '--set', hostname_type]
if hostname_type == 'LocalHostName':
cmd.append(to_native(self.scrubbed_name))
else:
cmd.append(to_native(name))
rc, out, err = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg="Failed to set {3} to '{2}': {0} {1}".format(to_native(out), to_native(err), to_native(name), hostname_type))
def set_current_hostname(self, name):
pass
def update_current_hostname(self):
pass
def update_permanent_hostname(self):
name = self.module.params['name']
# Get all the current host name values in the order of self.name_types
all_names = tuple(self.module.run_command([self.scutil, '--get', name_type])[1].strip() for name_type in self.name_types)
# Get the expected host name values based on the order in self.name_types
expected_names = tuple(self.scrubbed_name if n == 'LocalHostName' else name for n in self.name_types)
# Ensure all three names are updated
if all_names != expected_names:
if not self.module.check_mode:
self.set_permanent_hostname(name)
self.changed = True
class FedoraHostname(Hostname):
platform = 'Linux'
distribution = 'Fedora'
strategy_class = SystemdStrategy
class SLESHostname(Hostname):
platform = 'Linux'
distribution = 'Sles'
try:
distribution_version = get_distribution_version()
# cast to float may raise ValueError on non SLES, we use float for a little more safety over int
if distribution_version and 10 <= float(distribution_version) <= 12:
strategy_class = SLESStrategy
else:
raise ValueError()
except ValueError:
strategy_class = UnimplementedStrategy
class OpenSUSEHostname(Hostname):
platform = 'Linux'
distribution = 'Opensuse'
strategy_class = SystemdStrategy
class OpenSUSELeapHostname(Hostname):
platform = 'Linux'
distribution = 'Opensuse-leap'
strategy_class = SystemdStrategy
class OpenSUSETumbleweedHostname(Hostname):
platform = 'Linux'
distribution = 'Opensuse-tumbleweed'
strategy_class = SystemdStrategy
class AsteraHostname(Hostname):
platform = 'Linux'
distribution = '"astralinuxce"'
strategy_class = SystemdStrategy
class ArchHostname(Hostname):
platform = 'Linux'
distribution = 'Arch'
strategy_class = SystemdStrategy
class ArchARMHostname(Hostname):
platform = 'Linux'
distribution = 'Archarm'
strategy_class = SystemdStrategy
class AlmaLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Almalinux'
strategy_class = SystemdStrategy
class ManjaroHostname(Hostname):
platform = 'Linux'
distribution = 'Manjaro'
strategy_class = SystemdStrategy
class ManjaroARMHostname(Hostname):
platform = 'Linux'
distribution = 'Manjaro-arm'
strategy_class = SystemdStrategy
class RHELHostname(Hostname):
platform = 'Linux'
distribution = 'Redhat'
strategy_class = RedHatStrategy
class CentOSHostname(Hostname):
platform = 'Linux'
distribution = 'Centos'
strategy_class = RedHatStrategy
class AnolisOSHostname(Hostname):
platform = 'Linux'
distribution = 'Anolis'
strategy_class = RedHatStrategy
class ClearLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Clear-linux-os'
strategy_class = SystemdStrategy
class CloudlinuxserverHostname(Hostname):
platform = 'Linux'
distribution = 'Cloudlinuxserver'
strategy_class = RedHatStrategy
class CloudlinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Cloudlinux'
strategy_class = RedHatStrategy
class AlinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Alinux'
strategy_class = RedHatStrategy
class CoreosHostname(Hostname):
platform = 'Linux'
distribution = 'Coreos'
strategy_class = SystemdStrategy
class FlatcarHostname(Hostname):
platform = 'Linux'
distribution = 'Flatcar'
strategy_class = SystemdStrategy
class ScientificHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific'
strategy_class = RedHatStrategy
class OracleLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Oracle'
strategy_class = RedHatStrategy
class VirtuozzoLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Virtuozzo'
strategy_class = RedHatStrategy
class AmazonLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Amazon'
strategy_class = RedHatStrategy
class DebianHostname(Hostname):
platform = 'Linux'
distribution = 'Debian'
strategy_class = FileStrategy
class KylinHostname(Hostname):
platform = 'Linux'
distribution = 'Kylin'
strategy_class = FileStrategy
class CumulusHostname(Hostname):
platform = 'Linux'
distribution = 'Cumulus-linux'
strategy_class = FileStrategy
class KaliHostname(Hostname):
platform = 'Linux'
distribution = 'Kali'
strategy_class = FileStrategy
class ParrotHostname(Hostname):
platform = 'Linux'
distribution = 'Parrot'
strategy_class = FileStrategy
class UbuntuHostname(Hostname):
platform = 'Linux'
distribution = 'Ubuntu'
strategy_class = FileStrategy
class LinuxmintHostname(Hostname):
platform = 'Linux'
distribution = 'Linuxmint'
strategy_class = FileStrategy
class LinaroHostname(Hostname):
platform = 'Linux'
distribution = 'Linaro'
strategy_class = FileStrategy
class DevuanHostname(Hostname):
platform = 'Linux'
distribution = 'Devuan'
strategy_class = FileStrategy
class RaspbianHostname(Hostname):
platform = 'Linux'
distribution = 'Raspbian'
strategy_class = FileStrategy
class GentooHostname(Hostname):
platform = 'Linux'
distribution = 'Gentoo'
strategy_class = OpenRCStrategy
class ALTLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Altlinux'
strategy_class = RedHatStrategy
class AlpineLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Alpine'
strategy_class = AlpineStrategy
class OpenBSDHostname(Hostname):
platform = 'OpenBSD'
distribution = None
strategy_class = OpenBSDStrategy
class SolarisHostname(Hostname):
platform = 'SunOS'
distribution = None
strategy_class = SolarisStrategy
class FreeBSDHostname(Hostname):
platform = 'FreeBSD'
distribution = None
strategy_class = FreeBSDStrategy
class NetBSDHostname(Hostname):
platform = 'NetBSD'
distribution = None
strategy_class = FreeBSDStrategy
class NeonHostname(Hostname):
platform = 'Linux'
distribution = 'Neon'
strategy_class = FileStrategy
class DarwinHostname(Hostname):
platform = 'Darwin'
distribution = None
strategy_class = DarwinStrategy
class OsmcHostname(Hostname):
platform = 'Linux'
distribution = 'Osmc'
strategy_class = SystemdStrategy
class PardusHostname(Hostname):
platform = 'Linux'
distribution = 'Pardus'
strategy_class = SystemdStrategy
class VoidLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Void'
strategy_class = FileStrategy
class PopHostname(Hostname):
platform = 'Linux'
distribution = 'Pop'
strategy_class = FileStrategy
class RockyHostname(Hostname):
platform = 'Linux'
distribution = 'Rocky'
strategy_class = SystemdStrategy
class RedosHostname(Hostname):
platform = 'Linux'
distribution = 'Redos'
strategy_class = SystemdStrategy
class EurolinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Eurolinux'
strategy_class = RedHatStrategy
class OpenEulerHostname(Hostname):
platform = 'Linux'
distribution = 'Openeuler'
strategy_class = SystemdStrategy
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
use=dict(type='str', choices=STRATS.keys())
),
supports_check_mode=True,
)
hostname = Hostname(module)
name = module.params['name']
current_hostname = hostname.get_current_hostname()
permanent_hostname = hostname.get_permanent_hostname()
changed = hostname.update_current_and_permanent_hostname()
if name != current_hostname:
name_before = current_hostname
elif name != permanent_hostname:
name_before = permanent_hostname
else:
name_before = permanent_hostname
# NOTE: socket.getfqdn() calls gethostbyaddr(socket.gethostname()), which can be
# slow to return if the name does not resolve correctly.
kw = dict(changed=changed, name=name,
ansible_facts=dict(ansible_hostname=name.split('.')[0],
ansible_nodename=name,
ansible_fqdn=socket.getfqdn(),
ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
if changed:
kw['diff'] = {'after': 'hostname = ' + name + '\n',
'before': 'hostname = ' + name_before + '\n'}
module.exit_json(**kw)
if __name__ == '__main__':
main()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,320 |
Module Logging does not naturally work with WSL2
|
### Summary
When developing a custom module on WSL2, and calling `AnsibleModule.log(msg)`, the underlying code in `ansible/module_utils/basic.py` incorrectly assumes systemd-journal is setup well as long as the corresponding python modules import correctly and do not throw errors. WSL2 does not naturally support systemd, and therefore the log messages are not falling back into syslog, which is much better supported by WSL2.
Note that I do realize there is a workaround for systemd on WSL2 with `systemd-genie`, but that shouldn't be required just to get log messages out of Ansible.
### Issue Type
Bug Report
### Component Name
AnsibleModule.log()
### Ansible Version
```console
2.10.9
```
### Configuration
```console
$ ansible-config dump --only-changed
DEFAULT_HOST_LIST(/etc/ansible/ansible.cfg) = ['/etc/ansible/inventory_localhost.yml']
DEFAULT_VERBOSITY(/etc/ansible/ansible.cfg) = 0
DISPLAY_SKIPPED_HOSTS(/etc/ansible/ansible.cfg) = False
HOST_KEY_CHECKING(/etc/ansible/ansible.cfg) = False
INTERPRETER_PYTHON(/etc/ansible/ansible.cfg) = auto_silent
```
### OS / Environment
WSL2
### Steps to Reproduce
Just call `AnsibleModule.log()` on WSL2.
### Expected Results
Log messages should go to syslog on WSL2.
### Actual Results
```console
Log messages cannot be found.
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76320
|
https://github.com/ansible/ansible/pull/76375
|
8febd37f325b049afe448af689064ee019d1099c
|
8c06aada1063b81409a5d8e95eaf7309fa72b9bd
| 2021-11-18T13:55:21Z |
python
| 2022-01-21T16:27:36Z |
changelogs/fragments/76375-fix-module-logging-WSL2.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,320 |
Module Logging does not naturally work with WSL2
|
### Summary
When developing a custom module on WSL2, and calling `AnsibleModule.log(msg)`, the underlying code in `ansible/module_utils/basic.py` incorrectly assumes systemd-journal is setup well as long as the corresponding python modules import correctly and do not throw errors. WSL2 does not naturally support systemd, and therefore the log messages are not falling back into syslog, which is much better supported by WSL2.
Note that I do realize there is a workaround for systemd on WSL2 with `systemd-genie`, but that shouldn't be required just to get log messages out of Ansible.
### Issue Type
Bug Report
### Component Name
AnsibleModule.log()
### Ansible Version
```console
2.10.9
```
### Configuration
```console
$ ansible-config dump --only-changed
DEFAULT_HOST_LIST(/etc/ansible/ansible.cfg) = ['/etc/ansible/inventory_localhost.yml']
DEFAULT_VERBOSITY(/etc/ansible/ansible.cfg) = 0
DISPLAY_SKIPPED_HOSTS(/etc/ansible/ansible.cfg) = False
HOST_KEY_CHECKING(/etc/ansible/ansible.cfg) = False
INTERPRETER_PYTHON(/etc/ansible/ansible.cfg) = auto_silent
```
### OS / Environment
WSL2
### Steps to Reproduce
Just call `AnsibleModule.log()` on WSL2.
### Expected Results
Log messages should go to syslog on WSL2.
### Actual Results
```console
Log messages cannot be found.
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76320
|
https://github.com/ansible/ansible/pull/76375
|
8febd37f325b049afe448af689064ee019d1099c
|
8c06aada1063b81409a5d8e95eaf7309fa72b9bd
| 2021-11-18T13:55:21Z |
python
| 2022-01-21T16:27:36Z |
lib/ansible/module_utils/basic.py
|
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# Copyright (c), Toshio Kuratomi <[email protected]> 2016
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
# Ansible modules can be written in any language.
# The functions available here can be used to do many common tasks,
# to simplify development of Python modules.
import __main__
import atexit
import errno
import datetime
import grp
import fcntl
import locale
import os
import pwd
import platform
import re
import select
import shlex
import shutil
import signal
import stat
import subprocess
import sys
import tempfile
import time
import traceback
import types
from itertools import chain, repeat
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
# Makes sure that systemd.journal has method sendv()
# Double check that journal has method sendv (some packages don't)
has_journal = hasattr(journal, 'sendv')
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
from ansible.module_utils.compat import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
from ansible.module_utils.compat import selectors
from ._text import to_native, to_bytes, to_text
from ansible.module_utils.common.text.converters import (
jsonify,
container_to_bytes as json_dict_unicode_to_bytes,
container_to_text as json_dict_bytes_to_unicode,
)
from ansible.module_utils.common.arg_spec import ModuleArgumentSpecValidator
from ansible.module_utils.common.text.formatters import (
lenient_lowercase,
bytes_to_human,
human_to_bytes,
SIZE_RANGES,
)
try:
from ansible.module_utils.common._json_compat import json
except ImportError as e:
print('\n{{"msg": "Error: ansible requires the stdlib json: {0}", "failed": true}}'.format(to_native(e)))
sys.exit(1)
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
# we may have been able to import md5 but it could still not be available
try:
hashlib.md5()
except ValueError:
AVAILABLE_HASH_ALGORITHMS.pop('md5', None)
except Exception:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except Exception:
pass
from ansible.module_utils.common._collections_compat import (
KeysView,
Mapping, MutableMapping,
Sequence, MutableSequence,
Set, MutableSet,
)
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.file import (
_PERM_BITS as PERM_BITS,
_EXEC_PERM_BITS as EXEC_PERM_BITS,
_DEFAULT_PERM as DEFAULT_PERM,
is_executable,
format_attributes,
get_flags_from_attributes,
)
from ansible.module_utils.common.sys_info import (
get_distribution,
get_distribution_version,
get_platform_subclass,
)
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.common.parameters import (
env_fallback,
remove_values,
sanitize_keys,
DEFAULT_TYPE_VALIDATORS,
PASS_VARS,
PASS_BOOLS,
)
from ansible.module_utils.errors import AnsibleFallbackNotFound, AnsibleValidationErrorMultiple, UnsupportedError
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils.common.validation import (
check_missing_parameters,
safe_eval,
)
from ansible.module_utils.common._utils import get_all_subclasses as _get_all_subclasses
from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
from ansible.module_utils.common.warnings import (
deprecate,
get_deprecation_messages,
get_warning_messages,
warn,
)
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
SEQUENCETYPE = frozenset, KeysView, Sequence
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
# These are things we want. About setting metadata (mode, ownership, permissions in general) on
# created files (these are used by set_fs_attributes_if_different and included in
# load_file_common_arguments)
mode=dict(type='raw'),
owner=dict(type='str'),
group=dict(type='str'),
seuser=dict(type='str'),
serole=dict(type='str'),
selevel=dict(type='str'),
setype=dict(type='str'),
attributes=dict(type='str', aliases=['attr']),
unsafe_writes=dict(type='bool', default=False, fallback=(env_fallback, ['ANSIBLE_UNSAFE_WRITES'])), # should be available to any module using atomic_move
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Used for parsing symbolic file perms
MODE_OPERATOR_RE = re.compile(r'[+=-]')
USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
# Used for determining if the system is running a new enough python version
# and should only restrict on our documented minimum versions
_PY3_MIN = sys.version_info >= (3, 5)
_PY2_MIN = (2, 7) <= sys.version_info < (3,)
_PY_MIN = _PY3_MIN or _PY2_MIN
if not _PY_MIN:
print(
'\n{"failed": true, '
'"msg": "ansible-core requires a minimum of Python2 version 2.7 or Python3 version 3.5. Current version: %s"}' % ''.join(sys.version.splitlines())
)
sys.exit(1)
#
# Deprecated functions
#
def get_platform():
'''
**Deprecated** Use :py:func:`platform.system` directly.
:returns: Name of the platform the module is running on in a native string
Returns a native string that labels the platform ("Linux", "Solaris", etc). Currently, this is
the result of calling :py:func:`platform.system`.
'''
return platform.system()
# End deprecated functions
#
# Compat shims
#
def load_platform_subclass(cls, *args, **kwargs):
"""**Deprecated**: Use ansible.module_utils.common.sys_info.get_platform_subclass instead"""
platform_cls = get_platform_subclass(cls)
return super(cls, platform_cls).__new__(platform_cls)
def get_all_subclasses(cls):
"""**Deprecated**: Use ansible.module_utils.common._utils.get_all_subclasses instead"""
return list(_get_all_subclasses(cls))
# End compat shims
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:prev_begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def missing_required_lib(library, reason=None, url=None):
hostname = platform.node()
msg = "Failed to import the required Python library (%s) on %s's Python %s." % (library, hostname, sys.executable)
if reason:
msg += " This is required %s." % reason
if url:
msg += " See %s for more info." % url
msg += (" Please read the module documentation and install it in the appropriate location."
" If the required library is installed, but Ansible is using the wrong Python interpreter,"
" please consult the documentation on ansible_python_interpreter")
return msg
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False,
supports_check_mode=False, required_if=None, required_by=None):
'''
Common code for quickly building an ansible module in Python
(although you can write modules with anything that can return JSON).
See :ref:`developing_modules_general` for a general introduction
and :ref:`developing_program_flow_modules` for more detailed explanation.
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.bypass_checks = bypass_checks
self.no_log = no_log
self.mutually_exclusive = mutually_exclusive
self.required_together = required_together
self.required_one_of = required_one_of
self.required_if = required_if
self.required_by = required_by
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._shell = None
self._syslog_facility = 'LOG_USER'
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._clean = {}
self._string_conversion_action = ''
self.aliases = {}
self._legal_inputs = []
self._options_context = list()
self._tmpdir = None
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
# Save parameter values that should never be logged
self.no_log_values = set()
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._load_params()
self._set_internal_properties()
self.validator = ModuleArgumentSpecValidator(self.argument_spec,
self.mutually_exclusive,
self.required_together,
self.required_one_of,
self.required_if,
self.required_by,
)
self.validation_result = self.validator.validate(self.params)
self.params.update(self.validation_result.validated_parameters)
self.no_log_values.update(self.validation_result._no_log_values)
try:
error = self.validation_result.errors[0]
except IndexError:
error = None
# Fail for validation errors, even in check mode
if error:
msg = self.validation_result.errors.msg
if isinstance(error, UnsupportedError):
msg = "Unsupported parameters for ({name}) {kind}: {msg}".format(name=self._name, kind='module', msg=msg)
self.fail_json(msg=msg)
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
# This is for backwards compatibility only.
self._CHECK_ARGUMENT_TYPES_DISPATCHER = DEFAULT_TYPE_VALIDATORS
if not self.no_log:
self._log_invocation()
# selinux state caching
self._selinux_enabled = None
self._selinux_mls_enabled = None
self._selinux_initial_context = None
# finally, make sure we're in a sane working dir
self._set_cwd()
@property
def tmpdir(self):
# if _ansible_tmpdir was not set and we have a remote_tmp,
# the module needs to create it and clean it up once finished.
# otherwise we create our own module tmp dir from the system defaults
if self._tmpdir is None:
basedir = None
if self._remote_tmp is not None:
basedir = os.path.expanduser(os.path.expandvars(self._remote_tmp))
if basedir is not None and not os.path.exists(basedir):
try:
os.makedirs(basedir, mode=0o700)
except (OSError, IOError) as e:
self.warn("Unable to use %s as temporary directory, "
"failing back to system: %s" % (basedir, to_native(e)))
basedir = None
else:
self.warn("Module remote_tmp %s did not exist and was "
"created with a mode of 0700, this may cause"
" issues when running as another user. To "
"avoid this, create the remote_tmp dir with "
"the correct permissions manually" % basedir)
basefile = "ansible-moduletmp-%s-" % time.time()
try:
tmpdir = tempfile.mkdtemp(prefix=basefile, dir=basedir)
except (OSError, IOError) as e:
self.fail_json(
msg="Failed to create remote module tmp path at dir %s "
"with prefix %s: %s" % (basedir, basefile, to_native(e))
)
if not self._keep_remote_files:
atexit.register(shutil.rmtree, tmpdir)
self._tmpdir = tmpdir
return self._tmpdir
def warn(self, warning):
warn(warning)
self.log('[WARNING] %s' % warning)
def deprecate(self, msg, version=None, date=None, collection_name=None):
if version is not None and date is not None:
raise AssertionError("implementation error -- version and date must not both be set")
deprecate(msg, version=version, date=date, collection_name=collection_name)
# For compatibility, we accept that neither version nor date is set,
# and treat that the same as if version would haven been set
if date is not None:
self.log('[DEPRECATION WARNING] %s %s' % (msg, date))
else:
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
def load_file_common_arguments(self, params, path=None):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
Allows to overwrite the path/dest module argument by providing path.
'''
if path is None:
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if self._selinux_mls_enabled is None:
self._selinux_mls_enabled = HAVE_SELINUX and selinux.is_selinux_mls_enabled() == 1
return self._selinux_mls_enabled
def selinux_enabled(self):
if self._selinux_enabled is None:
self._selinux_enabled = HAVE_SELINUX and selinux.is_selinux_enabled() == 1
return self._selinux_enabled
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
if self._selinux_initial_context is None:
self._selinux_initial_context = [None, None, None]
if self.selinux_mls_enabled():
self._selinux_initial_context.append(None)
return self._selinux_initial_context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError as e:
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
'''
Takes a path and returns it's mount point
:param path: a string type with a filesystem path
:returns: the path to the mount point as a text type
'''
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except Exception:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if to_bytes(path_mount_point) == to_bytes(mount_point):
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not self.selinux_enabled():
return changed
if self.check_file_absent_if_check_mode(path):
return True
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
except OSError as e:
self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
if owner is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except (IOError, OSError) as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: %s' % (to_text(e)))
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
if group is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
if mode is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
path_stat = os.lstat(b_path)
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=to_native(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError as e:
if os.path.islink(b_path) and e.errno in (
errno.EACCES, # can't access symlink in sticky directory (stat)
errno.EPERM, # can't set mode on symbolic links (chmod)
errno.EROFS, # can't set mode on read-only filesystem
):
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=to_native(e),
exception=traceback.format_exc())
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if self.check_file_absent_if_check_mode(b_path):
return True
existing = self.get_file_attributes(b_path, include_version=False)
attr_mod = '='
if attributes.startswith(('-', '+')):
attr_mod = attributes[0]
attributes = attributes[1:]
if existing.get('attr_flags', '') != attributes or attr_mod == '-':
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '%s%s' % (attr_mod, attributes), b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = '%s%s' % (attr_mod, attributes)
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
self.fail_json(path=to_text(b_path), msg='chattr failed',
details=to_native(e), exception=traceback.format_exc())
return changed
def get_file_attributes(self, path, include_version=True):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
flags = '-vd' if include_version else '-d'
attrcmd = [attrcmd, flags, path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split()
attr_flags_idx = 0
if include_version:
attr_flags_idx = 1
output['version'] = res[0].strip()
output['attr_flags'] = res[attr_flags_idx].replace('-', '').strip()
output['attributes'] = format_attributes(output['attr_flags'])
except Exception:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# https://docs.python.org/3/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3},
'o': {
'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO},
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def check_file_absent_if_check_mode(self, file_path):
return self.check_mode and not os.path.exists(file_path)
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'best' locale, per the function
# final fallback is 'C', which may cause unicode issues
# but is preferable to simply failing on unknown locale
best_locale = get_best_parsable_locale(self)
# need to set several since many tools choose to ignore documented precedence and scope
locale.setlocale(locale.LC_ALL, best_locale)
os.environ['LANG'] = best_locale
os.environ['LC_ALL'] = best_locale
os.environ['LC_MESSAGES'] = best_locale
except Exception as e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
to_native(e), exception=traceback.format_exc())
def _set_internal_properties(self, argument_spec=None, module_parameters=None):
if argument_spec is None:
argument_spec = self.argument_spec
if module_parameters is None:
module_parameters = self.params
for k in PASS_VARS:
# handle setting internal properties from internal ansible vars
param_key = '_ansible_%s' % k
if param_key in module_parameters:
if k in PASS_BOOLS:
setattr(self, PASS_VARS[k][0], self.boolean(module_parameters[param_key]))
else:
setattr(self, PASS_VARS[k][0], module_parameters[param_key])
# clean up internal top level params:
if param_key in self.params:
del self.params[param_key]
else:
# use defaults if not already set
if not hasattr(self, PASS_VARS[k][0]):
setattr(self, PASS_VARS[k][0], PASS_VARS[k][1])
def safe_eval(self, value, locals=None, include_exceptions=False):
return safe_eval(value, locals, include_exceptions)
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
try:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
except TypeError as e:
self.fail_json(
msg='Failed to log to syslog (%s). To proceed anyway, '
'disable syslog logging by setting no_target_syslog '
'to True in your Ansible config.' % to_native(e),
exception=traceback.format_exc(),
msg_to_log=msg,
)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
name, value = (arg.upper(), str(log_args[arg]))
if name in (
'PRIORITY', 'MESSAGE', 'MESSAGE_ID',
'CODE_FILE', 'CODE_LINE', 'CODE_FUNC',
'SYSLOG_FACILITY', 'SYSLOG_IDENTIFIER',
'SYSLOG_PID',
):
name = "_%s" % name
journal_args.append((name, value))
try:
if HAS_SYSLOG:
# If syslog_facility specified, it needs to convert
# from the facility name to the facility code, and
# set it as SYSLOG_FACILITY argument of journal.send()
facility = getattr(syslog,
self._syslog_facility,
syslog.LOG_USER) >> 3
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
SYSLOG_FACILITY=facility,
**dict(journal_args))
else:
journal.send(MESSAGE=u"%s %s" % (module, journal_msg),
**dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', None)
# try to proactively capture password/passphrase fields
if no_log is None and PASSWORD_MATCH.search(param):
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
elif self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except Exception:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [self.tmpdir, os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except Exception:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=None):
'''
Find system executable in PATH.
:param arg: The executable to find.
:param required: if executable is not found and required is ``True``, fail_json
:param opt_dirs: optional list of directories to search in addition to ``PATH``
:returns: if found return full path; otherwise return None
'''
bin_path = None
try:
bin_path = get_bin_path(arg=arg, opt_dirs=opt_dirs)
except ValueError as e:
if required:
self.fail_json(msg=to_text(e))
else:
return bin_path
return bin_path
def boolean(self, arg):
'''Convert the argument to a boolean'''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
try:
return jsonify(data)
except UnicodeError as e:
self.fail_json(msg=to_text(e))
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
warnings = get_warning_messages()
if warnings:
kwargs['warnings'] = warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
elif isinstance(d, Mapping):
self.deprecate(d['msg'], version=d.get('version'), date=d.get('date'),
collection_name=d.get('collection_name'))
else:
self.deprecate(d) # pylint: disable=ansible-deprecated-no-version
else:
self.deprecate(kwargs['deprecations']) # pylint: disable=ansible-deprecated-no-version
deprecations = get_deprecation_messages()
if deprecations:
kwargs['deprecations'] = deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, msg, **kwargs):
''' return from the module, with an error message '''
kwargs['failed'] = True
kwargs['msg'] = msg
# Add traceback if debug or high verbosity and it is missing
# NOTE: Badly named as exception, it really always has been a traceback
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
if PY2:
# On Python 2 this is the last (stack frame) exception and as such may be unrelated to the failure
kwargs['exception'] = 'WARNING: The below traceback may *not* be related to the actual failure.\n' +\
''.join(traceback.format_tb(sys.exc_info()[2]))
else:
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
if not required_params:
return
try:
check_missing_parameters(self.params, required_params)
except TypeError as e:
self.fail_json(msg=to_native(e))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
b_filename = to_bytes(filename, errors='surrogate_or_strict')
if not os.path.exists(b_filename):
return None
if os.path.isdir(b_filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(b_filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
self.preserved_copy(fn, backupdest)
except (shutil.Error, IOError) as e:
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError as e:
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
def preserved_copy(self, src, dest):
"""Copy a file with preserved ownership, permissions and context"""
# shutil.copy2(src, dst)
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
#
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
# unaffected. src and dst are path names given as strings.
shutil.copy2(src, dest)
# Set the context
if self.selinux_enabled():
context = self.selinux_context(src)
self.set_context_if_different(dest, context, False)
# chown it
try:
dest_stat = os.stat(src)
tmp_stat = os.stat(dest)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
# Set the attributes
current_attribs = self.get_file_attributes(src, include_version=False)
current_attribs = current_attribs.get('attr_flags', '')
self.set_attributes_if_different(dest, current_attribs, True)
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError as e:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError as e:
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError) as e:
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)), exception=traceback.format_exc())
else:
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
b_dest_dir = os.path.dirname(b_dest)
b_suffix = os.path.basename(b_dest)
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=b'.ansible_tmp', dir=b_dest_dir, suffix=b_suffix)
except (OSError, IOError) as e:
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp().
# Traceback would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating tmp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tmpdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError) as e:
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to make %s into to %s, failed final rename from %s: %s' %
(src, dest, b_tmp_dest_name, to_native(e)), exception=traceback.format_exc())
except (shutil.Error, OSError, IOError) as e:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)), exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
out_dest = in_src = None
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
exception=traceback.format_exc())
def _clean_args(self, args):
if not self._clean:
# create a printable version of the command for use in reporting later,
# which strips out things like passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
self._clean = ' '.join(shlex_quote(arg) for arg in clean_args)
return self._clean
def _restore_signal_handlers(self):
# Reset SIGPIPE to SIG_DFL, otherwise in Python2.7 it gets ignored in subprocesses.
if PY2 and sys.platform != 'win32':
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict',
expand_user_and_vars=True, pass_fds=None, before_communicate_callback=None, ignore_invalid_cwd=True, handle_exceptions=True):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment variable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* environ variables with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:kw expand_user_and_vars: When ``use_unsafe_shell=False`` this argument
dictates whether ``~`` is expanded in paths and environment variables
are expanded before running the command. When ``True`` a string such as
``$SHELL`` will be expanded regardless of escaping. When ``False`` and
``use_unsafe_shell=False`` no path or variable expansion will be done.
:kw pass_fds: When running on Python 3 this argument
dictates which file descriptors should be passed
to an underlying ``Popen`` constructor. On Python 2, this will
set ``close_fds`` to False.
:kw before_communicate_callback: This function will be called
after ``Popen`` object will be created
but before communicating to the process.
(``Popen`` object will be passed to callback as a first argument)
:kw ignore_invalid_cwd: This flag indicates whether an invalid ``cwd``
(non-existent or not a directory) should be ignored or should raise
an exception.
:kw handle_exceptions: This flag indicates whether an exception will
be handled inline and issue a failed_json or if the caller should
handle it.
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
# used by clean args later on
self._clean = None
if not isinstance(args, (list, binary_type, text_type)):
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
# stringify args for unsafe/direct shell usage
if isinstance(args, list):
args = b" ".join([to_bytes(shlex_quote(x), errors='surrogate_or_strict') for x in args])
else:
args = to_bytes(args, errors='surrogate_or_strict')
# not set explicitly, check if set by controller
if executable:
executable = to_bytes(executable, errors='surrogate_or_strict')
args = [executable, b'-c', args]
elif self._shell not in (None, '/bin/sh'):
args = [to_bytes(self._shell, errors='surrogate_or_strict'), b'-c', args]
else:
shell = True
else:
# ensure args are a list
if isinstance(args, (binary_type, text_type)):
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
# expand ``~`` in paths, and all environment vars
if expand_user_and_vars:
args = [to_bytes(os.path.expanduser(os.path.expandvars(x)), errors='surrogate_or_strict') for x in args if x is not None]
else:
args = [to_bytes(x, errors='surrogate_or_strict') for x in args if x is not None]
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
rc = 0
msg = None
st_in = None
env = os.environ.copy()
# We can set this from both an attribute and per call
env.update(self.run_command_environ_update or {})
env.update(environ_update or {})
if path_prefix:
path = env.get('PATH', '')
if path:
env['PATH'] = "%s:%s" % (path_prefix, path)
else:
env['PATH'] = path_prefix
# If using test-module.py and explode, the remote lib path will resemble:
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system:
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in env:
pypaths = [x for x in env['PYTHONPATH'].split(':')
if x and
not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
if pypaths and any(pypaths):
env['PYTHONPATH'] = ':'.join(pypaths)
if data:
st_in = subprocess.PIPE
def preexec():
self._restore_signal_handlers()
if umask:
os.umask(umask)
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=preexec,
env=env,
)
if PY3 and pass_fds:
kwargs["pass_fds"] = pass_fds
elif PY2 and pass_fds:
kwargs['close_fds'] = False
# make sure we're in the right working directory
if cwd:
cwd = to_bytes(os.path.abspath(os.path.expanduser(cwd)), errors='surrogate_or_strict')
if os.path.isdir(cwd):
kwargs['cwd'] = cwd
elif not ignore_invalid_cwd:
self.fail_json(msg="Provided cwd is not a valid directory: %s" % cwd)
try:
if self._debug:
self.log('Executing: ' + self._clean_args(args))
cmd = subprocess.Popen(args, **kwargs)
if before_communicate_callback:
before_communicate_callback(cmd)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b''
stderr = b''
try:
selector = selectors.DefaultSelector()
except (IOError, OSError):
# Failed to detect default selector for the given platform
# Select PollSelector which is supported by major platforms
selector = selectors.PollSelector()
selector.register(cmd.stdout, selectors.EVENT_READ)
selector.register(cmd.stderr, selectors.EVENT_READ)
if os.name == 'posix':
fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stdout.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_SETFL, fcntl.fcntl(cmd.stderr.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
events = selector.select(1)
for key, event in events:
b_chunk = key.fileobj.read()
if b_chunk == b(''):
selector.unregister(key.fileobj)
if key.fileobj == cmd.stdout:
stdout += b_chunk
elif key.fileobj == cmd.stderr:
stderr += b_chunk
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not events or not selector.get_map()) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if no selectors are left
elif not selector.get_map() and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
selector.close()
rc = cmd.returncode
except (OSError, IOError) as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(e)))
if handle_exceptions:
self.fail_json(rc=e.errno, stdout=b'', stderr=b'', msg=to_native(e), cmd=self._clean_args(args))
else:
raise e
except Exception as e:
self.log("Error Executing CMD:%s Exception:%s" % (self._clean_args(args), to_native(traceback.format_exc())))
if handle_exceptions:
self.fail_json(rc=257, stdout=b'', stderr=b'', msg=to_native(e), exception=traceback.format_exc(), cmd=self._clean_args(args))
else:
raise e
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=self._clean_args(args), rc=rc, stdout=stdout, stderr=stderr, msg=msg)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
@staticmethod
def get_buffer_size(fd):
try:
# 1032 == FZ_GETPIPE_SZ
buffer_size = fcntl.fcntl(fd, 1032)
except Exception:
try:
# not as exact as above, but should be good enough for most platforms that fail the previous call
buffer_size = select.PIPE_BUF
except Exception:
buffer_size = 9000 # use sane default JIC
return buffer_size
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,560 |
`INTERPRETER_PYTHON_DISTRO_MAP` YAML anchors maintainability
|
### Summary
After looking over some recent PRs to add additional aliases to `rhelish`, I'm beginning to believe that using YAML anchors isn't the right thing for long term maintainability.
I think we should add a step in the Python code that uses the distro family map from `module_utils.facts` to select the "rhelish" option from `INTERPRETER_PYTHON_DISTRO_MAP` unless a specific distro entry exists.
This will help us maintain 1 list, and limit the number of changes we need to make to the config to handle new distros.
### Issue Type
Feature Idea
### Component Name
lib/ansible/executor/interpreter_discovery.py
### Additional Information
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75560
|
https://github.com/ansible/ansible/pull/76815
|
cfe4bdc1b9a91f043fa23f78478e0c20f05175ae
|
d7d1bd6269fa463a8ac030047c285fc3677ad8a0
| 2021-08-24T15:13:05Z |
python
| 2022-01-21T17:46:45Z |
changelogs/fragments/75560-interp-discovery-family-fallback.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,560 |
`INTERPRETER_PYTHON_DISTRO_MAP` YAML anchors maintainability
|
### Summary
After looking over some recent PRs to add additional aliases to `rhelish`, I'm beginning to believe that using YAML anchors isn't the right thing for long term maintainability.
I think we should add a step in the Python code that uses the distro family map from `module_utils.facts` to select the "rhelish" option from `INTERPRETER_PYTHON_DISTRO_MAP` unless a specific distro entry exists.
This will help us maintain 1 list, and limit the number of changes we need to make to the config to handle new distros.
### Issue Type
Feature Idea
### Component Name
lib/ansible/executor/interpreter_discovery.py
### Additional Information
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75560
|
https://github.com/ansible/ansible/pull/76815
|
cfe4bdc1b9a91f043fa23f78478e0c20f05175ae
|
d7d1bd6269fa463a8ac030047c285fc3677ad8a0
| 2021-08-24T15:13:05Z |
python
| 2022-01-21T17:46:45Z |
lib/ansible/config/base.yml
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
---
ALLOW_WORLD_READABLE_TMPFILES:
name: Allow world-readable temporary files
description:
- This setting has been moved to the individual shell plugins as a plugin option :ref:`shell_plugins`.
- The existing configuration settings are still accepted with the shell plugin adding additional options, like variables.
- This message will be removed in 2.14.
type: boolean
default: False
deprecated: # (kept for autodetection and removal, deprecation is irrelevant since w/o settings this can never show runtime msg)
why: moved to shell plugins
version: "2.14"
alternatives: 'world_readable_tmp'
ANSIBLE_CONNECTION_PATH:
name: Path of ansible-connection script
default: null
description:
- Specify where to look for the ansible-connection script. This location will be checked before searching $PATH.
- If null, ansible will start with the same directory as the ansible script.
type: path
env: [{name: ANSIBLE_CONNECTION_PATH}]
ini:
- {key: ansible_connection_path, section: persistent_connection}
yaml: {key: persistent_connection.ansible_connection_path}
version_added: "2.8"
ANSIBLE_COW_SELECTION:
name: Cowsay filter selection
default: default
description: This allows you to chose a specific cowsay stencil for the banners or use 'random' to cycle through them.
env: [{name: ANSIBLE_COW_SELECTION}]
ini:
- {key: cow_selection, section: defaults}
ANSIBLE_COW_ACCEPTLIST:
name: Cowsay filter acceptance list
default: ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant', 'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep', 'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder', 'vader-koala', 'vader', 'www']
description: White list of cowsay templates that are 'safe' to use, set to empty list if you want to enable all installed templates.
env:
- name: ANSIBLE_COW_WHITELIST
deprecated:
why: normalizing names to new standard
version: "2.15"
alternatives: 'ANSIBLE_COW_ACCEPTLIST'
- name: ANSIBLE_COW_ACCEPTLIST
version_added: '2.11'
ini:
- key: cow_whitelist
section: defaults
deprecated:
why: normalizing names to new standard
version: "2.15"
alternatives: 'cowsay_enabled_stencils'
- key: cowsay_enabled_stencils
section: defaults
version_added: '2.11'
type: list
ANSIBLE_FORCE_COLOR:
name: Force color output
default: False
description: This option forces color mode even when running without a TTY or the "nocolor" setting is True.
env: [{name: ANSIBLE_FORCE_COLOR}]
ini:
- {key: force_color, section: defaults}
type: boolean
yaml: {key: display.force_color}
ANSIBLE_NOCOLOR:
name: Suppress color output
default: False
description: This setting allows suppressing colorizing output, which is used to give a better indication of failure and status information.
env:
- name: ANSIBLE_NOCOLOR
# this is generic convention for CLI programs
- name: NO_COLOR
version_added: '2.11'
ini:
- {key: nocolor, section: defaults}
type: boolean
yaml: {key: display.nocolor}
ANSIBLE_NOCOWS:
name: Suppress cowsay output
default: False
description: If you have cowsay installed but want to avoid the 'cows' (why????), use this.
env: [{name: ANSIBLE_NOCOWS}]
ini:
- {key: nocows, section: defaults}
type: boolean
yaml: {key: display.i_am_no_fun}
ANSIBLE_COW_PATH:
name: Set path to cowsay command
default: null
description: Specify a custom cowsay path or swap in your cowsay implementation of choice
env: [{name: ANSIBLE_COW_PATH}]
ini:
- {key: cowpath, section: defaults}
type: string
yaml: {key: display.cowpath}
ANSIBLE_PIPELINING:
name: Connection pipelining
default: False
description:
- This is a global option, each connection plugin can override either by having more specific options or not supporting pipelining at all.
- Pipelining, if supported by the connection plugin, reduces the number of network operations required to execute a module on the remote server,
by executing many Ansible modules without actual file transfer.
- It can result in a very significant performance improvement when enabled.
- "However this conflicts with privilege escalation (become). For example, when using 'sudo:' operations you must first
disable 'requiretty' in /etc/sudoers on all managed hosts, which is why it is disabled by default."
- This setting will be disabled if ``ANSIBLE_KEEP_REMOTE_FILES`` is enabled.
env:
- name: ANSIBLE_PIPELINING
ini:
- section: defaults
key: pipelining
- section: connection
key: pipelining
type: boolean
ANY_ERRORS_FATAL:
name: Make Task failures fatal
default: False
description: Sets the default value for the any_errors_fatal keyword, if True, Task failures will be considered fatal errors.
env:
- name: ANSIBLE_ANY_ERRORS_FATAL
ini:
- section: defaults
key: any_errors_fatal
type: boolean
yaml: {key: errors.any_task_errors_fatal}
version_added: "2.4"
BECOME_ALLOW_SAME_USER:
name: Allow becoming the same user
default: False
description:
- This setting controls if become is skipped when remote user and become user are the same. I.E root sudo to root.
- If executable, it will be run and the resulting stdout will be used as the password.
env: [{name: ANSIBLE_BECOME_ALLOW_SAME_USER}]
ini:
- {key: become_allow_same_user, section: privilege_escalation}
type: boolean
yaml: {key: privilege_escalation.become_allow_same_user}
BECOME_PASSWORD_FILE:
name: Become password file
default: ~
description:
- 'The password file to use for the become plugin. --become-password-file.'
- If executable, it will be run and the resulting stdout will be used as the password.
env: [{name: ANSIBLE_BECOME_PASSWORD_FILE}]
ini:
- {key: become_password_file, section: defaults}
type: path
version_added: '2.12'
AGNOSTIC_BECOME_PROMPT:
name: Display an agnostic become prompt
default: True
type: boolean
description: Display an agnostic become prompt instead of displaying a prompt containing the command line supplied become method
env: [{name: ANSIBLE_AGNOSTIC_BECOME_PROMPT}]
ini:
- {key: agnostic_become_prompt, section: privilege_escalation}
yaml: {key: privilege_escalation.agnostic_become_prompt}
version_added: "2.5"
CACHE_PLUGIN:
name: Persistent Cache plugin
default: memory
description: Chooses which cache plugin to use, the default 'memory' is ephemeral.
env: [{name: ANSIBLE_CACHE_PLUGIN}]
ini:
- {key: fact_caching, section: defaults}
yaml: {key: facts.cache.plugin}
CACHE_PLUGIN_CONNECTION:
name: Cache Plugin URI
default: ~
description: Defines connection or path information for the cache plugin
env: [{name: ANSIBLE_CACHE_PLUGIN_CONNECTION}]
ini:
- {key: fact_caching_connection, section: defaults}
yaml: {key: facts.cache.uri}
CACHE_PLUGIN_PREFIX:
name: Cache Plugin table prefix
default: ansible_facts
description: Prefix to use for cache plugin files/tables
env: [{name: ANSIBLE_CACHE_PLUGIN_PREFIX}]
ini:
- {key: fact_caching_prefix, section: defaults}
yaml: {key: facts.cache.prefix}
CACHE_PLUGIN_TIMEOUT:
name: Cache Plugin expiration timeout
default: 86400
description: Expiration timeout for the cache plugin data
env: [{name: ANSIBLE_CACHE_PLUGIN_TIMEOUT}]
ini:
- {key: fact_caching_timeout, section: defaults}
type: integer
yaml: {key: facts.cache.timeout}
COLLECTIONS_SCAN_SYS_PATH:
name: Scan PYTHONPATH for installed collections
description: A boolean to enable or disable scanning the sys.path for installed collections
default: true
type: boolean
env:
- {name: ANSIBLE_COLLECTIONS_SCAN_SYS_PATH}
ini:
- {key: collections_scan_sys_path, section: defaults}
COLLECTIONS_PATHS:
name: ordered list of root paths for loading installed Ansible collections content
description: >
Colon separated paths in which Ansible will search for collections content.
Collections must be in nested *subdirectories*, not directly in these directories.
For example, if ``COLLECTIONS_PATHS`` includes ``~/.ansible/collections``,
and you want to add ``my.collection`` to that directory, it must be saved as
``~/.ansible/collections/ansible_collections/my/collection``.
default: ~/.ansible/collections:/usr/share/ansible/collections
type: pathspec
env:
- name: ANSIBLE_COLLECTIONS_PATHS # TODO: Deprecate this and ini once PATH has been in a few releases.
- name: ANSIBLE_COLLECTIONS_PATH
version_added: '2.10'
ini:
- key: collections_paths
section: defaults
- key: collections_path
section: defaults
version_added: '2.10'
COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH:
name: Defines behavior when loading a collection that does not support the current Ansible version
description:
- When a collection is loaded that does not support the running Ansible version (via the collection metadata key
`requires_ansible`), the default behavior is to issue a warning and continue anyway. Setting this value to `ignore`
skips the warning entirely, while setting it to `error` will immediately halt Ansible execution.
env: [{name: ANSIBLE_COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH}]
ini: [{key: collections_on_ansible_version_mismatch, section: defaults}]
choices: [error, warning, ignore]
default: warning
_COLOR_DEFAULTS: &color
name: placeholder for color settings' defaults
choices: ['black', 'bright gray', 'blue', 'white', 'green', 'bright blue', 'cyan', 'bright green', 'red', 'bright cyan', 'purple', 'bright red', 'yellow', 'bright purple', 'dark gray', 'bright yellow', 'magenta', 'bright magenta', 'normal']
COLOR_CHANGED:
<<: *color
name: Color for 'changed' task status
default: yellow
description: Defines the color to use on 'Changed' task status
env: [{name: ANSIBLE_COLOR_CHANGED}]
ini:
- {key: changed, section: colors}
COLOR_CONSOLE_PROMPT:
<<: *color
name: "Color for ansible-console's prompt task status"
default: white
description: Defines the default color to use for ansible-console
env: [{name: ANSIBLE_COLOR_CONSOLE_PROMPT}]
ini:
- {key: console_prompt, section: colors}
version_added: "2.7"
COLOR_DEBUG:
<<: *color
name: Color for debug statements
default: dark gray
description: Defines the color to use when emitting debug messages
env: [{name: ANSIBLE_COLOR_DEBUG}]
ini:
- {key: debug, section: colors}
COLOR_DEPRECATE:
<<: *color
name: Color for deprecation messages
default: purple
description: Defines the color to use when emitting deprecation messages
env: [{name: ANSIBLE_COLOR_DEPRECATE}]
ini:
- {key: deprecate, section: colors}
COLOR_DIFF_ADD:
<<: *color
name: Color for diff added display
default: green
description: Defines the color to use when showing added lines in diffs
env: [{name: ANSIBLE_COLOR_DIFF_ADD}]
ini:
- {key: diff_add, section: colors}
yaml: {key: display.colors.diff.add}
COLOR_DIFF_LINES:
<<: *color
name: Color for diff lines display
default: cyan
description: Defines the color to use when showing diffs
env: [{name: ANSIBLE_COLOR_DIFF_LINES}]
ini:
- {key: diff_lines, section: colors}
COLOR_DIFF_REMOVE:
<<: *color
name: Color for diff removed display
default: red
description: Defines the color to use when showing removed lines in diffs
env: [{name: ANSIBLE_COLOR_DIFF_REMOVE}]
ini:
- {key: diff_remove, section: colors}
COLOR_ERROR:
<<: *color
name: Color for error messages
default: red
description: Defines the color to use when emitting error messages
env: [{name: ANSIBLE_COLOR_ERROR}]
ini:
- {key: error, section: colors}
yaml: {key: colors.error}
COLOR_HIGHLIGHT:
<<: *color
name: Color for highlighting
default: white
description: Defines the color to use for highlighting
env: [{name: ANSIBLE_COLOR_HIGHLIGHT}]
ini:
- {key: highlight, section: colors}
COLOR_OK:
<<: *color
name: Color for 'ok' task status
default: green
description: Defines the color to use when showing 'OK' task status
env: [{name: ANSIBLE_COLOR_OK}]
ini:
- {key: ok, section: colors}
COLOR_SKIP:
<<: *color
name: Color for 'skip' task status
default: cyan
description: Defines the color to use when showing 'Skipped' task status
env: [{name: ANSIBLE_COLOR_SKIP}]
ini:
- {key: skip, section: colors}
COLOR_UNREACHABLE:
<<: *color
name: Color for 'unreachable' host state
default: bright red
description: Defines the color to use on 'Unreachable' status
env: [{name: ANSIBLE_COLOR_UNREACHABLE}]
ini:
- {key: unreachable, section: colors}
COLOR_VERBOSE:
<<: *color
name: Color for verbose messages
default: blue
description: Defines the color to use when emitting verbose messages. i.e those that show with '-v's.
env: [{name: ANSIBLE_COLOR_VERBOSE}]
ini:
- {key: verbose, section: colors}
COLOR_WARN:
<<: *color
name: Color for warning messages
default: bright purple
description: Defines the color to use when emitting warning messages
env: [{name: ANSIBLE_COLOR_WARN}]
ini:
- {key: warn, section: colors}
CONNECTION_PASSWORD_FILE:
name: Connection password file
default: ~
description: 'The password file to use for the connection plugin. --connection-password-file.'
env: [{name: ANSIBLE_CONNECTION_PASSWORD_FILE}]
ini:
- {key: connection_password_file, section: defaults}
type: path
version_added: '2.12'
COVERAGE_REMOTE_OUTPUT:
name: Sets the output directory and filename prefix to generate coverage run info.
description:
- Sets the output directory on the remote host to generate coverage reports to.
- Currently only used for remote coverage on PowerShell modules.
- This is for internal use only.
env:
- {name: _ANSIBLE_COVERAGE_REMOTE_OUTPUT}
vars:
- {name: _ansible_coverage_remote_output}
type: str
version_added: '2.9'
COVERAGE_REMOTE_PATHS:
name: Sets the list of paths to run coverage for.
description:
- A list of paths for files on the Ansible controller to run coverage for when executing on the remote host.
- Only files that match the path glob will have its coverage collected.
- Multiple path globs can be specified and are separated by ``:``.
- Currently only used for remote coverage on PowerShell modules.
- This is for internal use only.
default: '*'
env:
- {name: _ANSIBLE_COVERAGE_REMOTE_PATH_FILTER}
type: str
version_added: '2.9'
ACTION_WARNINGS:
name: Toggle action warnings
default: True
description:
- By default Ansible will issue a warning when received from a task action (module or action plugin)
- These warnings can be silenced by adjusting this setting to False.
env: [{name: ANSIBLE_ACTION_WARNINGS}]
ini:
- {key: action_warnings, section: defaults}
type: boolean
version_added: "2.5"
COMMAND_WARNINGS:
name: Command module warnings
default: False
description:
- Ansible can issue a warning when the shell or command module is used and the command appears to be similar to an existing Ansible module.
- These warnings can be silenced by adjusting this setting to False. You can also control this at the task level with the module option ``warn``.
- As of version 2.11, this is disabled by default.
env: [{name: ANSIBLE_COMMAND_WARNINGS}]
ini:
- {key: command_warnings, section: defaults}
type: boolean
version_added: "1.8"
deprecated:
why: the command warnings feature is being removed
version: "2.14"
LOCALHOST_WARNING:
name: Warning when using implicit inventory with only localhost
default: True
description:
- By default Ansible will issue a warning when there are no hosts in the
inventory.
- These warnings can be silenced by adjusting this setting to False.
env: [{name: ANSIBLE_LOCALHOST_WARNING}]
ini:
- {key: localhost_warning, section: defaults}
type: boolean
version_added: "2.6"
DOC_FRAGMENT_PLUGIN_PATH:
name: documentation fragment plugins path
default: ~/.ansible/plugins/doc_fragments:/usr/share/ansible/plugins/doc_fragments
description: Colon separated paths in which Ansible will search for Documentation Fragments Plugins.
env: [{name: ANSIBLE_DOC_FRAGMENT_PLUGINS}]
ini:
- {key: doc_fragment_plugins, section: defaults}
type: pathspec
DEFAULT_ACTION_PLUGIN_PATH:
name: Action plugins path
default: ~/.ansible/plugins/action:/usr/share/ansible/plugins/action
description: Colon separated paths in which Ansible will search for Action Plugins.
env: [{name: ANSIBLE_ACTION_PLUGINS}]
ini:
- {key: action_plugins, section: defaults}
type: pathspec
yaml: {key: plugins.action.path}
DEFAULT_ALLOW_UNSAFE_LOOKUPS:
name: Allow unsafe lookups
default: False
description:
- "When enabled, this option allows lookup plugins (whether used in variables as ``{{lookup('foo')}}`` or as a loop as with_foo)
to return data that is not marked 'unsafe'."
- By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language,
as this could represent a security risk. This option is provided to allow for backward compatibility,
however users should first consider adding allow_unsafe=True to any lookups which may be expected to contain data which may be run
through the templating engine late
env: []
ini:
- {key: allow_unsafe_lookups, section: defaults}
type: boolean
version_added: "2.2.3"
DEFAULT_ASK_PASS:
name: Ask for the login password
default: False
description:
- This controls whether an Ansible playbook should prompt for a login password.
If using SSH keys for authentication, you probably do not need to change this setting.
env: [{name: ANSIBLE_ASK_PASS}]
ini:
- {key: ask_pass, section: defaults}
type: boolean
yaml: {key: defaults.ask_pass}
DEFAULT_ASK_VAULT_PASS:
name: Ask for the vault password(s)
default: False
description:
- This controls whether an Ansible playbook should prompt for a vault password.
env: [{name: ANSIBLE_ASK_VAULT_PASS}]
ini:
- {key: ask_vault_pass, section: defaults}
type: boolean
DEFAULT_BECOME:
name: Enable privilege escalation (become)
default: False
description: Toggles the use of privilege escalation, allowing you to 'become' another user after login.
env: [{name: ANSIBLE_BECOME}]
ini:
- {key: become, section: privilege_escalation}
type: boolean
DEFAULT_BECOME_ASK_PASS:
name: Ask for the privilege escalation (become) password
default: False
description: Toggle to prompt for privilege escalation password.
env: [{name: ANSIBLE_BECOME_ASK_PASS}]
ini:
- {key: become_ask_pass, section: privilege_escalation}
type: boolean
DEFAULT_BECOME_METHOD:
name: Choose privilege escalation method
default: 'sudo'
description: Privilege escalation method to use when `become` is enabled.
env: [{name: ANSIBLE_BECOME_METHOD}]
ini:
- {section: privilege_escalation, key: become_method}
DEFAULT_BECOME_EXE:
name: Choose 'become' executable
default: ~
description: 'executable to use for privilege escalation, otherwise Ansible will depend on PATH'
env: [{name: ANSIBLE_BECOME_EXE}]
ini:
- {key: become_exe, section: privilege_escalation}
DEFAULT_BECOME_FLAGS:
name: Set 'become' executable options
default: ~
description: Flags to pass to the privilege escalation executable.
env: [{name: ANSIBLE_BECOME_FLAGS}]
ini:
- {key: become_flags, section: privilege_escalation}
BECOME_PLUGIN_PATH:
name: Become plugins path
default: ~/.ansible/plugins/become:/usr/share/ansible/plugins/become
description: Colon separated paths in which Ansible will search for Become Plugins.
env: [{name: ANSIBLE_BECOME_PLUGINS}]
ini:
- {key: become_plugins, section: defaults}
type: pathspec
version_added: "2.8"
DEFAULT_BECOME_USER:
# FIXME: should really be blank and make -u passing optional depending on it
name: Set the user you 'become' via privilege escalation
default: root
description: The user your login/remote user 'becomes' when using privilege escalation, most systems will use 'root' when no user is specified.
env: [{name: ANSIBLE_BECOME_USER}]
ini:
- {key: become_user, section: privilege_escalation}
yaml: {key: become.user}
DEFAULT_CACHE_PLUGIN_PATH:
name: Cache Plugins Path
default: ~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache
description: Colon separated paths in which Ansible will search for Cache Plugins.
env: [{name: ANSIBLE_CACHE_PLUGINS}]
ini:
- {key: cache_plugins, section: defaults}
type: pathspec
DEFAULT_CALLBACK_PLUGIN_PATH:
name: Callback Plugins Path
default: ~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback
description: Colon separated paths in which Ansible will search for Callback Plugins.
env: [{name: ANSIBLE_CALLBACK_PLUGINS}]
ini:
- {key: callback_plugins, section: defaults}
type: pathspec
yaml: {key: plugins.callback.path}
CALLBACKS_ENABLED:
name: Enable callback plugins that require it.
default: []
description:
- "List of enabled callbacks, not all callbacks need enabling,
but many of those shipped with Ansible do as we don't want them activated by default."
env:
- name: ANSIBLE_CALLBACK_WHITELIST
deprecated:
why: normalizing names to new standard
version: "2.15"
alternatives: 'ANSIBLE_CALLBACKS_ENABLED'
- name: ANSIBLE_CALLBACKS_ENABLED
version_added: '2.11'
ini:
- key: callback_whitelist
section: defaults
deprecated:
why: normalizing names to new standard
version: "2.15"
alternatives: 'callbacks_enabled'
- key: callbacks_enabled
section: defaults
version_added: '2.11'
type: list
DEFAULT_CLICONF_PLUGIN_PATH:
name: Cliconf Plugins Path
default: ~/.ansible/plugins/cliconf:/usr/share/ansible/plugins/cliconf
description: Colon separated paths in which Ansible will search for Cliconf Plugins.
env: [{name: ANSIBLE_CLICONF_PLUGINS}]
ini:
- {key: cliconf_plugins, section: defaults}
type: pathspec
DEFAULT_CONNECTION_PLUGIN_PATH:
name: Connection Plugins Path
default: ~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection
description: Colon separated paths in which Ansible will search for Connection Plugins.
env: [{name: ANSIBLE_CONNECTION_PLUGINS}]
ini:
- {key: connection_plugins, section: defaults}
type: pathspec
yaml: {key: plugins.connection.path}
DEFAULT_DEBUG:
name: Debug mode
default: False
description:
- "Toggles debug output in Ansible. This is *very* verbose and can hinder
multiprocessing. Debug output can also include secret information
despite no_log settings being enabled, which means debug mode should not be used in
production."
env: [{name: ANSIBLE_DEBUG}]
ini:
- {key: debug, section: defaults}
type: boolean
DEFAULT_EXECUTABLE:
name: Target shell executable
default: /bin/sh
description:
- "This indicates the command to use to spawn a shell under for Ansible's execution needs on a target.
Users may need to change this in rare instances when shell usage is constrained, but in most cases it may be left as is."
env: [{name: ANSIBLE_EXECUTABLE}]
ini:
- {key: executable, section: defaults}
DEFAULT_FACT_PATH:
name: local fact path
description:
- "This option allows you to globally configure a custom path for 'local_facts' for the implied M(ansible.builtin.setup) task when using fact gathering."
- "If not set, it will fallback to the default from the M(ansible.builtin.setup) module: ``/etc/ansible/facts.d``."
- "This does **not** affect user defined tasks that use the M(ansible.builtin.setup) module."
- The real action being created by the implicit task is currently M(ansible.legacy.gather_facts), which then calls the configured fact modules,
by default this will be M(ansible.builtin.setup) for POSIX systems but other platforms might have different defaults.
env: [{name: ANSIBLE_FACT_PATH}]
ini:
- {key: fact_path, section: defaults}
type: string
# TODO: deprecate in 2.14
#deprecated:
# # TODO: when removing set playbook/play.py to default=None
# why: the module_defaults keyword is a more generic version and can apply to all calls to the
# M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions
# version: "2.18"
# alternatives: module_defaults
DEFAULT_FILTER_PLUGIN_PATH:
name: Jinja2 Filter Plugins Path
default: ~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter
description: Colon separated paths in which Ansible will search for Jinja2 Filter Plugins.
env: [{name: ANSIBLE_FILTER_PLUGINS}]
ini:
- {key: filter_plugins, section: defaults}
type: pathspec
DEFAULT_FORCE_HANDLERS:
name: Force handlers to run after failure
default: False
description:
- This option controls if notified handlers run on a host even if a failure occurs on that host.
- When false, the handlers will not run if a failure has occurred on a host.
- This can also be set per play or on the command line. See Handlers and Failure for more details.
env: [{name: ANSIBLE_FORCE_HANDLERS}]
ini:
- {key: force_handlers, section: defaults}
type: boolean
version_added: "1.9.1"
DEFAULT_FORKS:
name: Number of task forks
default: 5
description: Maximum number of forks Ansible will use to execute tasks on target hosts.
env: [{name: ANSIBLE_FORKS}]
ini:
- {key: forks, section: defaults}
type: integer
DEFAULT_GATHERING:
name: Gathering behaviour
default: 'implicit'
description:
- This setting controls the default policy of fact gathering (facts discovered about remote systems).
- "When 'implicit' (the default), the cache plugin will be ignored and facts will be gathered per play unless 'gather_facts: False' is set."
- "When 'explicit' the inverse is true, facts will not be gathered unless directly requested in the play."
- "The 'smart' value means each new host that has no facts discovered will be scanned,
but if the same host is addressed in multiple plays it will not be contacted again in the playbook run."
- "This option can be useful for those wishing to save fact gathering time. Both 'smart' and 'explicit' will use the cache plugin."
env: [{name: ANSIBLE_GATHERING}]
ini:
- key: gathering
section: defaults
version_added: "1.6"
choices: ['smart', 'explicit', 'implicit']
DEFAULT_GATHER_SUBSET:
name: Gather facts subset
description:
- Set the `gather_subset` option for the M(ansible.builtin.setup) task in the implicit fact gathering.
See the module documentation for specifics.
- "It does **not** apply to user defined M(ansible.builtin.setup) tasks."
env: [{name: ANSIBLE_GATHER_SUBSET}]
ini:
- key: gather_subset
section: defaults
version_added: "2.1"
type: list
# TODO: deprecate in 2.14
#deprecated:
# # TODO: when removing set playbook/play.py to default=None
# why: the module_defaults keyword is a more generic version and can apply to all calls to the
# M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions
# version: "2.18"
# alternatives: module_defaults
DEFAULT_GATHER_TIMEOUT:
name: Gather facts timeout
description:
- Set the timeout in seconds for the implicit fact gathering, see the module documentation for specifics.
- "It does **not** apply to user defined M(ansible.builtin.setup) tasks."
env: [{name: ANSIBLE_GATHER_TIMEOUT}]
ini:
- {key: gather_timeout, section: defaults}
type: integer
# TODO: deprecate in 2.14
#deprecated:
# # TODO: when removing set playbook/play.py to default=None
# why: the module_defaults keyword is a more generic version and can apply to all calls to the
# M(ansible.builtin.gather_facts) or M(ansible.builtin.setup) actions
# version: "2.18"
# alternatives: module_defaults
DEFAULT_HASH_BEHAVIOUR:
name: Hash merge behaviour
default: replace
type: string
choices:
replace: Any variable that is defined more than once is overwritten using the order from variable precedence rules (highest wins).
merge: Any dictionary variable will be recursively merged with new definitions across the different variable definition sources.
description:
- This setting controls how duplicate definitions of dictionary variables (aka hash, map, associative array) are handled in Ansible.
- This does not affect variables whose values are scalars (integers, strings) or arrays.
- "**WARNING**, changing this setting is not recommended as this is fragile and makes your content (plays, roles, collections) non portable,
leading to continual confusion and misuse. Don't change this setting unless you think you have an absolute need for it."
- We recommend avoiding reusing variable names and relying on the ``combine`` filter and ``vars`` and ``varnames`` lookups
to create merged versions of the individual variables. In our experience this is rarely really needed and a sign that too much
complexity has been introduced into the data structures and plays.
- For some uses you can also look into custom vars_plugins to merge on input, even substituting the default ``host_group_vars``
that is in charge of parsing the ``host_vars/`` and ``group_vars/`` directories. Most users of this setting are only interested in inventory scope,
but the setting itself affects all sources and makes debugging even harder.
- All playbooks and roles in the official examples repos assume the default for this setting.
- Changing the setting to ``merge`` applies across variable sources, but many sources will internally still overwrite the variables.
For example ``include_vars`` will dedupe variables internally before updating Ansible, with 'last defined' overwriting previous definitions in same file.
- The Ansible project recommends you **avoid ``merge`` for new projects.**
- It is the intention of the Ansible developers to eventually deprecate and remove this setting, but it is being kept as some users do heavily rely on it.
New projects should **avoid 'merge'**.
env: [{name: ANSIBLE_HASH_BEHAVIOUR}]
ini:
- {key: hash_behaviour, section: defaults}
DEFAULT_HOST_LIST:
name: Inventory Source
default: /etc/ansible/hosts
description: Comma separated list of Ansible inventory sources
env:
- name: ANSIBLE_INVENTORY
expand_relative_paths: True
ini:
- key: inventory
section: defaults
type: pathlist
yaml: {key: defaults.inventory}
DEFAULT_HTTPAPI_PLUGIN_PATH:
name: HttpApi Plugins Path
default: ~/.ansible/plugins/httpapi:/usr/share/ansible/plugins/httpapi
description: Colon separated paths in which Ansible will search for HttpApi Plugins.
env: [{name: ANSIBLE_HTTPAPI_PLUGINS}]
ini:
- {key: httpapi_plugins, section: defaults}
type: pathspec
DEFAULT_INTERNAL_POLL_INTERVAL:
name: Internal poll interval
default: 0.001
env: []
ini:
- {key: internal_poll_interval, section: defaults}
type: float
version_added: "2.2"
description:
- This sets the interval (in seconds) of Ansible internal processes polling each other.
Lower values improve performance with large playbooks at the expense of extra CPU load.
Higher values are more suitable for Ansible usage in automation scenarios,
when UI responsiveness is not required but CPU usage might be a concern.
- "The default corresponds to the value hardcoded in Ansible <= 2.1"
DEFAULT_INVENTORY_PLUGIN_PATH:
name: Inventory Plugins Path
default: ~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory
description: Colon separated paths in which Ansible will search for Inventory Plugins.
env: [{name: ANSIBLE_INVENTORY_PLUGINS}]
ini:
- {key: inventory_plugins, section: defaults}
type: pathspec
DEFAULT_JINJA2_EXTENSIONS:
name: Enabled Jinja2 extensions
default: []
description:
- This is a developer-specific feature that allows enabling additional Jinja2 extensions.
- "See the Jinja2 documentation for details. If you do not know what these do, you probably don't need to change this setting :)"
env: [{name: ANSIBLE_JINJA2_EXTENSIONS}]
ini:
- {key: jinja2_extensions, section: defaults}
DEFAULT_JINJA2_NATIVE:
name: Use Jinja2's NativeEnvironment for templating
default: False
description: This option preserves variable types during template operations.
env: [{name: ANSIBLE_JINJA2_NATIVE}]
ini:
- {key: jinja2_native, section: defaults}
type: boolean
yaml: {key: jinja2_native}
version_added: 2.7
DEFAULT_KEEP_REMOTE_FILES:
name: Keep remote files
default: False
description:
- Enables/disables the cleaning up of the temporary files Ansible used to execute the tasks on the remote.
- If this option is enabled it will disable ``ANSIBLE_PIPELINING``.
env: [{name: ANSIBLE_KEEP_REMOTE_FILES}]
ini:
- {key: keep_remote_files, section: defaults}
type: boolean
DEFAULT_LIBVIRT_LXC_NOSECLABEL:
# TODO: move to plugin
name: No security label on Lxc
default: False
description:
- "This setting causes libvirt to connect to lxc containers by passing --noseclabel to virsh.
This is necessary when running on systems which do not have SELinux."
env:
- name: LIBVIRT_LXC_NOSECLABEL
deprecated:
why: environment variables without ``ANSIBLE_`` prefix are deprecated
version: "2.12"
alternatives: the ``ANSIBLE_LIBVIRT_LXC_NOSECLABEL`` environment variable
- name: ANSIBLE_LIBVIRT_LXC_NOSECLABEL
ini:
- {key: libvirt_lxc_noseclabel, section: selinux}
type: boolean
version_added: "2.1"
DEFAULT_LOAD_CALLBACK_PLUGINS:
name: Load callbacks for adhoc
default: False
description:
- Controls whether callback plugins are loaded when running /usr/bin/ansible.
This may be used to log activity from the command line, send notifications, and so on.
Callback plugins are always loaded for ``ansible-playbook``.
env: [{name: ANSIBLE_LOAD_CALLBACK_PLUGINS}]
ini:
- {key: bin_ansible_callbacks, section: defaults}
type: boolean
version_added: "1.8"
DEFAULT_LOCAL_TMP:
name: Controller temporary directory
default: ~/.ansible/tmp
description: Temporary directory for Ansible to use on the controller.
env: [{name: ANSIBLE_LOCAL_TEMP}]
ini:
- {key: local_tmp, section: defaults}
type: tmppath
DEFAULT_LOG_PATH:
name: Ansible log file path
default: ~
description: File to which Ansible will log on the controller. When empty logging is disabled.
env: [{name: ANSIBLE_LOG_PATH}]
ini:
- {key: log_path, section: defaults}
type: path
DEFAULT_LOG_FILTER:
name: Name filters for python logger
default: []
description: List of logger names to filter out of the log file
env: [{name: ANSIBLE_LOG_FILTER}]
ini:
- {key: log_filter, section: defaults}
type: list
DEFAULT_LOOKUP_PLUGIN_PATH:
name: Lookup Plugins Path
description: Colon separated paths in which Ansible will search for Lookup Plugins.
default: ~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup
env: [{name: ANSIBLE_LOOKUP_PLUGINS}]
ini:
- {key: lookup_plugins, section: defaults}
type: pathspec
yaml: {key: defaults.lookup_plugins}
DEFAULT_MANAGED_STR:
name: Ansible managed
default: 'Ansible managed'
description: Sets the macro for the 'ansible_managed' variable available for M(ansible.builtin.template) and M(ansible.windows.win_template) modules. This is only relevant for those two modules.
env: []
ini:
- {key: ansible_managed, section: defaults}
yaml: {key: defaults.ansible_managed}
DEFAULT_MODULE_ARGS:
name: Adhoc default arguments
default: ~
description:
- This sets the default arguments to pass to the ``ansible`` adhoc binary if no ``-a`` is specified.
env: [{name: ANSIBLE_MODULE_ARGS}]
ini:
- {key: module_args, section: defaults}
DEFAULT_MODULE_COMPRESSION:
name: Python module compression
default: ZIP_DEFLATED
description: Compression scheme to use when transferring Python modules to the target.
env: []
ini:
- {key: module_compression, section: defaults}
# vars:
# - name: ansible_module_compression
DEFAULT_MODULE_NAME:
name: Default adhoc module
default: command
description: "Module to use with the ``ansible`` AdHoc command, if none is specified via ``-m``."
env: []
ini:
- {key: module_name, section: defaults}
DEFAULT_MODULE_PATH:
name: Modules Path
description: Colon separated paths in which Ansible will search for Modules.
default: ~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules
env: [{name: ANSIBLE_LIBRARY}]
ini:
- {key: library, section: defaults}
type: pathspec
DEFAULT_MODULE_UTILS_PATH:
name: Module Utils Path
description: Colon separated paths in which Ansible will search for Module utils files, which are shared by modules.
default: ~/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils
env: [{name: ANSIBLE_MODULE_UTILS}]
ini:
- {key: module_utils, section: defaults}
type: pathspec
DEFAULT_NETCONF_PLUGIN_PATH:
name: Netconf Plugins Path
default: ~/.ansible/plugins/netconf:/usr/share/ansible/plugins/netconf
description: Colon separated paths in which Ansible will search for Netconf Plugins.
env: [{name: ANSIBLE_NETCONF_PLUGINS}]
ini:
- {key: netconf_plugins, section: defaults}
type: pathspec
DEFAULT_NO_LOG:
name: No log
default: False
description: "Toggle Ansible's display and logging of task details, mainly used to avoid security disclosures."
env: [{name: ANSIBLE_NO_LOG}]
ini:
- {key: no_log, section: defaults}
type: boolean
DEFAULT_NO_TARGET_SYSLOG:
name: No syslog on target
default: False
description:
- Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts this will disable a newer
style PowerShell modules from writting to the event log.
env: [{name: ANSIBLE_NO_TARGET_SYSLOG}]
ini:
- {key: no_target_syslog, section: defaults}
vars:
- name: ansible_no_target_syslog
version_added: '2.10'
type: boolean
yaml: {key: defaults.no_target_syslog}
DEFAULT_NULL_REPRESENTATION:
name: Represent a null
default: ~
description: What templating should return as a 'null' value. When not set it will let Jinja2 decide.
env: [{name: ANSIBLE_NULL_REPRESENTATION}]
ini:
- {key: null_representation, section: defaults}
type: none
DEFAULT_POLL_INTERVAL:
name: Async poll interval
default: 15
description:
- For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling),
this is how often to check back on the status of those tasks when an explicit poll interval is not supplied.
The default is a reasonably moderate 15 seconds which is a tradeoff between checking in frequently and
providing a quick turnaround when something may have completed.
env: [{name: ANSIBLE_POLL_INTERVAL}]
ini:
- {key: poll_interval, section: defaults}
type: integer
DEFAULT_PRIVATE_KEY_FILE:
name: Private key file
default: ~
description:
- Option for connections using a certificate or key file to authenticate, rather than an agent or passwords,
you can set the default value here to avoid re-specifying --private-key with every invocation.
env: [{name: ANSIBLE_PRIVATE_KEY_FILE}]
ini:
- {key: private_key_file, section: defaults}
type: path
DEFAULT_PRIVATE_ROLE_VARS:
name: Private role variables
default: False
description:
- Makes role variables inaccessible from other roles.
- This was introduced as a way to reset role variables to default values if
a role is used more than once in a playbook.
env: [{name: ANSIBLE_PRIVATE_ROLE_VARS}]
ini:
- {key: private_role_vars, section: defaults}
type: boolean
yaml: {key: defaults.private_role_vars}
DEFAULT_REMOTE_PORT:
name: Remote port
default: ~
description: Port to use in remote connections, when blank it will use the connection plugin default.
env: [{name: ANSIBLE_REMOTE_PORT}]
ini:
- {key: remote_port, section: defaults}
type: integer
yaml: {key: defaults.remote_port}
DEFAULT_REMOTE_USER:
name: Login/Remote User
description:
- Sets the login user for the target machines
- "When blank it uses the connection plugin's default, normally the user currently executing Ansible."
env: [{name: ANSIBLE_REMOTE_USER}]
ini:
- {key: remote_user, section: defaults}
DEFAULT_ROLES_PATH:
name: Roles path
default: ~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
description: Colon separated paths in which Ansible will search for Roles.
env: [{name: ANSIBLE_ROLES_PATH}]
expand_relative_paths: True
ini:
- {key: roles_path, section: defaults}
type: pathspec
yaml: {key: defaults.roles_path}
DEFAULT_SELINUX_SPECIAL_FS:
name: Problematic file systems
default: fuse, nfs, vboxsf, ramfs, 9p, vfat
description:
- "Some filesystems do not support safe operations and/or return inconsistent errors,
this setting makes Ansible 'tolerate' those in the list w/o causing fatal errors."
- Data corruption may occur and writes are not always verified when a filesystem is in the list.
env:
- name: ANSIBLE_SELINUX_SPECIAL_FS
version_added: "2.9"
ini:
- {key: special_context_filesystems, section: selinux}
type: list
DEFAULT_STDOUT_CALLBACK:
name: Main display callback plugin
default: default
description:
- "Set the main callback used to display Ansible output, you can only have one at a time."
- You can have many other callbacks, but just one can be in charge of stdout.
env: [{name: ANSIBLE_STDOUT_CALLBACK}]
ini:
- {key: stdout_callback, section: defaults}
ENABLE_TASK_DEBUGGER:
name: Whether to enable the task debugger
default: False
description:
- Whether or not to enable the task debugger, this previously was done as a strategy plugin.
- Now all strategy plugins can inherit this behavior. The debugger defaults to activating when
- a task is failed on unreachable. Use the debugger keyword for more flexibility.
type: boolean
env: [{name: ANSIBLE_ENABLE_TASK_DEBUGGER}]
ini:
- {key: enable_task_debugger, section: defaults}
version_added: "2.5"
TASK_DEBUGGER_IGNORE_ERRORS:
name: Whether a failed task with ignore_errors=True will still invoke the debugger
default: True
description:
- This option defines whether the task debugger will be invoked on a failed task when ignore_errors=True
is specified.
- True specifies that the debugger will honor ignore_errors, False will not honor ignore_errors.
type: boolean
env: [{name: ANSIBLE_TASK_DEBUGGER_IGNORE_ERRORS}]
ini:
- {key: task_debugger_ignore_errors, section: defaults}
version_added: "2.7"
DEFAULT_STRATEGY:
name: Implied strategy
default: 'linear'
description: Set the default strategy used for plays.
env: [{name: ANSIBLE_STRATEGY}]
ini:
- {key: strategy, section: defaults}
version_added: "2.3"
DEFAULT_STRATEGY_PLUGIN_PATH:
name: Strategy Plugins Path
description: Colon separated paths in which Ansible will search for Strategy Plugins.
default: ~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy
env: [{name: ANSIBLE_STRATEGY_PLUGINS}]
ini:
- {key: strategy_plugins, section: defaults}
type: pathspec
DEFAULT_SU:
default: False
description: 'Toggle the use of "su" for tasks.'
env: [{name: ANSIBLE_SU}]
ini:
- {key: su, section: defaults}
type: boolean
yaml: {key: defaults.su}
DEFAULT_SYSLOG_FACILITY:
name: syslog facility
default: LOG_USER
description: Syslog facility to use when Ansible logs to the remote target
env: [{name: ANSIBLE_SYSLOG_FACILITY}]
ini:
- {key: syslog_facility, section: defaults}
DEFAULT_TERMINAL_PLUGIN_PATH:
name: Terminal Plugins Path
default: ~/.ansible/plugins/terminal:/usr/share/ansible/plugins/terminal
description: Colon separated paths in which Ansible will search for Terminal Plugins.
env: [{name: ANSIBLE_TERMINAL_PLUGINS}]
ini:
- {key: terminal_plugins, section: defaults}
type: pathspec
DEFAULT_TEST_PLUGIN_PATH:
name: Jinja2 Test Plugins Path
description: Colon separated paths in which Ansible will search for Jinja2 Test Plugins.
default: ~/.ansible/plugins/test:/usr/share/ansible/plugins/test
env: [{name: ANSIBLE_TEST_PLUGINS}]
ini:
- {key: test_plugins, section: defaults}
type: pathspec
DEFAULT_TIMEOUT:
name: Connection timeout
default: 10
description: This is the default timeout for connection plugins to use.
env: [{name: ANSIBLE_TIMEOUT}]
ini:
- {key: timeout, section: defaults}
type: integer
DEFAULT_TRANSPORT:
# note that ssh_utils refs this and needs to be updated if removed
name: Connection plugin
default: smart
description: "Default connection plugin to use, the 'smart' option will toggle between 'ssh' and 'paramiko' depending on controller OS and ssh versions"
env: [{name: ANSIBLE_TRANSPORT}]
ini:
- {key: transport, section: defaults}
DEFAULT_UNDEFINED_VAR_BEHAVIOR:
name: Jinja2 fail on undefined
default: True
version_added: "1.3"
description:
- When True, this causes ansible templating to fail steps that reference variable names that are likely typoed.
- "Otherwise, any '{{ template_expression }}' that contains undefined variables will be rendered in a template or ansible action line exactly as written."
env: [{name: ANSIBLE_ERROR_ON_UNDEFINED_VARS}]
ini:
- {key: error_on_undefined_vars, section: defaults}
type: boolean
DEFAULT_VARS_PLUGIN_PATH:
name: Vars Plugins Path
default: ~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars
description: Colon separated paths in which Ansible will search for Vars Plugins.
env: [{name: ANSIBLE_VARS_PLUGINS}]
ini:
- {key: vars_plugins, section: defaults}
type: pathspec
# TODO: unused?
#DEFAULT_VAR_COMPRESSION_LEVEL:
# default: 0
# description: 'TODO: write it'
# env: [{name: ANSIBLE_VAR_COMPRESSION_LEVEL}]
# ini:
# - {key: var_compression_level, section: defaults}
# type: integer
# yaml: {key: defaults.var_compression_level}
DEFAULT_VAULT_ID_MATCH:
name: Force vault id match
default: False
description: 'If true, decrypting vaults with a vault id will only try the password from the matching vault-id'
env: [{name: ANSIBLE_VAULT_ID_MATCH}]
ini:
- {key: vault_id_match, section: defaults}
yaml: {key: defaults.vault_id_match}
DEFAULT_VAULT_IDENTITY:
name: Vault id label
default: default
description: 'The label to use for the default vault id label in cases where a vault id label is not provided'
env: [{name: ANSIBLE_VAULT_IDENTITY}]
ini:
- {key: vault_identity, section: defaults}
yaml: {key: defaults.vault_identity}
DEFAULT_VAULT_ENCRYPT_IDENTITY:
name: Vault id to use for encryption
description: 'The vault_id to use for encrypting by default. If multiple vault_ids are provided, this specifies which to use for encryption. The --encrypt-vault-id cli option overrides the configured value.'
env: [{name: ANSIBLE_VAULT_ENCRYPT_IDENTITY}]
ini:
- {key: vault_encrypt_identity, section: defaults}
yaml: {key: defaults.vault_encrypt_identity}
DEFAULT_VAULT_IDENTITY_LIST:
name: Default vault ids
default: []
description: 'A list of vault-ids to use by default. Equivalent to multiple --vault-id args. Vault-ids are tried in order.'
env: [{name: ANSIBLE_VAULT_IDENTITY_LIST}]
ini:
- {key: vault_identity_list, section: defaults}
type: list
yaml: {key: defaults.vault_identity_list}
DEFAULT_VAULT_PASSWORD_FILE:
name: Vault password file
default: ~
description:
- 'The vault password file to use. Equivalent to --vault-password-file or --vault-id'
- If executable, it will be run and the resulting stdout will be used as the password.
env: [{name: ANSIBLE_VAULT_PASSWORD_FILE}]
ini:
- {key: vault_password_file, section: defaults}
type: path
yaml: {key: defaults.vault_password_file}
DEFAULT_VERBOSITY:
name: Verbosity
default: 0
description: Sets the default verbosity, equivalent to the number of ``-v`` passed in the command line.
env: [{name: ANSIBLE_VERBOSITY}]
ini:
- {key: verbosity, section: defaults}
type: integer
DEPRECATION_WARNINGS:
name: Deprecation messages
default: True
description: "Toggle to control the showing of deprecation warnings"
env: [{name: ANSIBLE_DEPRECATION_WARNINGS}]
ini:
- {key: deprecation_warnings, section: defaults}
type: boolean
DEVEL_WARNING:
name: Running devel warning
default: True
description: Toggle to control showing warnings related to running devel
env: [{name: ANSIBLE_DEVEL_WARNING}]
ini:
- {key: devel_warning, section: defaults}
type: boolean
DIFF_ALWAYS:
name: Show differences
default: False
description: Configuration toggle to tell modules to show differences when in 'changed' status, equivalent to ``--diff``.
env: [{name: ANSIBLE_DIFF_ALWAYS}]
ini:
- {key: always, section: diff}
type: bool
DIFF_CONTEXT:
name: Difference context
default: 3
description: How many lines of context to show when displaying the differences between files.
env: [{name: ANSIBLE_DIFF_CONTEXT}]
ini:
- {key: context, section: diff}
type: integer
DISPLAY_ARGS_TO_STDOUT:
name: Show task arguments
default: False
description:
- "Normally ``ansible-playbook`` will print a header for each task that is run.
These headers will contain the name: field from the task if you specified one.
If you didn't then ``ansible-playbook`` uses the task's action to help you tell which task is presently running.
Sometimes you run many of the same action and so you want more information about the task to differentiate it from others of the same action.
If you set this variable to True in the config then ``ansible-playbook`` will also include the task's arguments in the header."
- "This setting defaults to False because there is a chance that you have sensitive values in your parameters and
you do not want those to be printed."
- "If you set this to True you should be sure that you have secured your environment's stdout
(no one can shoulder surf your screen and you aren't saving stdout to an insecure file) or
made sure that all of your playbooks explicitly added the ``no_log: True`` parameter to tasks which have sensitive values
See How do I keep secret data in my playbook? for more information."
env: [{name: ANSIBLE_DISPLAY_ARGS_TO_STDOUT}]
ini:
- {key: display_args_to_stdout, section: defaults}
type: boolean
version_added: "2.1"
DISPLAY_SKIPPED_HOSTS:
name: Show skipped results
default: True
description: "Toggle to control displaying skipped task/host entries in a task in the default callback"
env:
- name: DISPLAY_SKIPPED_HOSTS
deprecated:
why: environment variables without ``ANSIBLE_`` prefix are deprecated
version: "2.12"
alternatives: the ``ANSIBLE_DISPLAY_SKIPPED_HOSTS`` environment variable
- name: ANSIBLE_DISPLAY_SKIPPED_HOSTS
ini:
- {key: display_skipped_hosts, section: defaults}
type: boolean
DOCSITE_ROOT_URL:
name: Root docsite URL
default: https://docs.ansible.com/ansible-core/
description: Root docsite URL used to generate docs URLs in warning/error text;
must be an absolute URL with valid scheme and trailing slash.
ini:
- {key: docsite_root_url, section: defaults}
version_added: "2.8"
DUPLICATE_YAML_DICT_KEY:
name: Controls ansible behaviour when finding duplicate keys in YAML.
default: warn
description:
- By default Ansible will issue a warning when a duplicate dict key is encountered in YAML.
- These warnings can be silenced by adjusting this setting to False.
env: [{name: ANSIBLE_DUPLICATE_YAML_DICT_KEY}]
ini:
- {key: duplicate_dict_key, section: defaults}
type: string
choices: ['warn', 'error', 'ignore']
version_added: "2.9"
ERROR_ON_MISSING_HANDLER:
name: Missing handler error
default: True
description: "Toggle to allow missing handlers to become a warning instead of an error when notifying."
env: [{name: ANSIBLE_ERROR_ON_MISSING_HANDLER}]
ini:
- {key: error_on_missing_handler, section: defaults}
type: boolean
CONNECTION_FACTS_MODULES:
name: Map of connections to fact modules
default:
# use ansible.legacy names on unqualified facts modules to allow library/ overrides
asa: ansible.legacy.asa_facts
cisco.asa.asa: cisco.asa.asa_facts
eos: ansible.legacy.eos_facts
arista.eos.eos: arista.eos.eos_facts
frr: ansible.legacy.frr_facts
frr.frr.frr: frr.frr.frr_facts
ios: ansible.legacy.ios_facts
cisco.ios.ios: cisco.ios.ios_facts
iosxr: ansible.legacy.iosxr_facts
cisco.iosxr.iosxr: cisco.iosxr.iosxr_facts
junos: ansible.legacy.junos_facts
junipernetworks.junos.junos: junipernetworks.junos.junos_facts
nxos: ansible.legacy.nxos_facts
cisco.nxos.nxos: cisco.nxos.nxos_facts
vyos: ansible.legacy.vyos_facts
vyos.vyos.vyos: vyos.vyos.vyos_facts
exos: ansible.legacy.exos_facts
extreme.exos.exos: extreme.exos.exos_facts
slxos: ansible.legacy.slxos_facts
extreme.slxos.slxos: extreme.slxos.slxos_facts
voss: ansible.legacy.voss_facts
extreme.voss.voss: extreme.voss.voss_facts
ironware: ansible.legacy.ironware_facts
community.network.ironware: community.network.ironware_facts
description: "Which modules to run during a play's fact gathering stage based on connection"
type: dict
FACTS_MODULES:
name: Gather Facts Modules
default:
- smart
description: "Which modules to run during a play's fact gathering stage, using the default of 'smart' will try to figure it out based on connection type."
env: [{name: ANSIBLE_FACTS_MODULES}]
ini:
- {key: facts_modules, section: defaults}
type: list
vars:
- name: ansible_facts_modules
GALAXY_IGNORE_CERTS:
name: Galaxy validate certs
default: False
description:
- If set to yes, ansible-galaxy will not validate TLS certificates.
This can be useful for testing against a server with a self-signed certificate.
env: [{name: ANSIBLE_GALAXY_IGNORE}]
ini:
- {key: ignore_certs, section: galaxy}
type: boolean
GALAXY_ROLE_SKELETON:
name: Galaxy role skeleton directory
description: Role skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy``/``ansible-galaxy role``, same as ``--role-skeleton``.
env: [{name: ANSIBLE_GALAXY_ROLE_SKELETON}]
ini:
- {key: role_skeleton, section: galaxy}
type: path
GALAXY_ROLE_SKELETON_IGNORE:
name: Galaxy role skeleton ignore
default: ["^.git$", "^.*/.git_keep$"]
description: patterns of files to ignore inside a Galaxy role or collection skeleton directory
env: [{name: ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE}]
ini:
- {key: role_skeleton_ignore, section: galaxy}
type: list
GALAXY_COLLECTION_SKELETON:
name: Galaxy collection skeleton directory
description: Collection skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy collection``, same as ``--collection-skeleton``.
env: [{name: ANSIBLE_GALAXY_COLLECTION_SKELETON}]
ini:
- {key: collection_skeleton, section: galaxy}
type: path
GALAXY_COLLECTION_SKELETON_IGNORE:
name: Galaxy collection skeleton ignore
default: ["^.git$", "^.*/.git_keep$"]
description: patterns of files to ignore inside a Galaxy collection skeleton directory
env: [{name: ANSIBLE_GALAXY_COLLECTION_SKELETON_IGNORE}]
ini:
- {key: collection_skeleton_ignore, section: galaxy}
type: list
# TODO: unused?
#GALAXY_SCMS:
# name: Galaxy SCMS
# default: git, hg
# description: Available galaxy source control management systems.
# env: [{name: ANSIBLE_GALAXY_SCMS}]
# ini:
# - {key: scms, section: galaxy}
# type: list
GALAXY_SERVER:
default: https://galaxy.ansible.com
description: "URL to prepend when roles don't specify the full URI, assume they are referencing this server as the source."
env: [{name: ANSIBLE_GALAXY_SERVER}]
ini:
- {key: server, section: galaxy}
yaml: {key: galaxy.server}
GALAXY_SERVER_LIST:
description:
- A list of Galaxy servers to use when installing a collection.
- The value corresponds to the config ini header ``[galaxy_server.{{item}}]`` which defines the server details.
- 'See :ref:`galaxy_server_config` for more details on how to define a Galaxy server.'
- The order of servers in this list is used to as the order in which a collection is resolved.
- Setting this config option will ignore the :ref:`galaxy_server` config option.
env: [{name: ANSIBLE_GALAXY_SERVER_LIST}]
ini:
- {key: server_list, section: galaxy}
type: list
version_added: "2.9"
GALAXY_TOKEN_PATH:
default: ~/.ansible/galaxy_token
description: "Local path to galaxy access token file"
env: [{name: ANSIBLE_GALAXY_TOKEN_PATH}]
ini:
- {key: token_path, section: galaxy}
type: path
version_added: "2.9"
GALAXY_DISPLAY_PROGRESS:
default: ~
description:
- Some steps in ``ansible-galaxy`` display a progress wheel which can cause issues on certain displays or when
outputing the stdout to a file.
- This config option controls whether the display wheel is shown or not.
- The default is to show the display wheel if stdout has a tty.
env: [{name: ANSIBLE_GALAXY_DISPLAY_PROGRESS}]
ini:
- {key: display_progress, section: galaxy}
type: bool
version_added: "2.10"
GALAXY_CACHE_DIR:
default: ~/.ansible/galaxy_cache
description:
- The directory that stores cached responses from a Galaxy server.
- This is only used by the ``ansible-galaxy collection install`` and ``download`` commands.
- Cache files inside this dir will be ignored if they are world writable.
env:
- name: ANSIBLE_GALAXY_CACHE_DIR
ini:
- section: galaxy
key: cache_dir
type: path
version_added: '2.11'
HOST_KEY_CHECKING:
# note: constant not in use by ssh plugin anymore
# TODO: check non ssh connection plugins for use/migration
name: Check host keys
default: True
description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host'
env: [{name: ANSIBLE_HOST_KEY_CHECKING}]
ini:
- {key: host_key_checking, section: defaults}
type: boolean
HOST_PATTERN_MISMATCH:
name: Control host pattern mismatch behaviour
default: 'warning'
description: This setting changes the behaviour of mismatched host patterns, it allows you to force a fatal error, a warning or just ignore it
env: [{name: ANSIBLE_HOST_PATTERN_MISMATCH}]
ini:
- {key: host_pattern_mismatch, section: inventory}
choices: ['warning', 'error', 'ignore']
version_added: "2.8"
INTERPRETER_PYTHON:
name: Python interpreter path (or automatic discovery behavior) used for module execution
default: auto
env: [{name: ANSIBLE_PYTHON_INTERPRETER}]
ini:
- {key: interpreter_python, section: defaults}
vars:
- {name: ansible_python_interpreter}
version_added: "2.8"
description:
- Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode.
Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``.
All discovery modes employ a lookup table to use the included system Python (on distributions known to include one),
falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not
available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters
installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or
``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backwards-compatibility
with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present.
INTERPRETER_PYTHON_DISTRO_MAP:
name: Mapping of known included platform pythons for various Linux distros
default:
centos: &rhelish
'6': /usr/bin/python
'8': /usr/libexec/platform-python
'9': /usr/bin/python3
debian:
'8': /usr/bin/python
'10': /usr/bin/python3
fedora:
'23': /usr/bin/python3
oracle: *rhelish
redhat: *rhelish
rhel: *rhelish
rocky: *rhelish
ubuntu:
'14': /usr/bin/python
'16': /usr/bin/python3
version_added: "2.8"
# FUTURE: add inventory override once we're sure it can't be abused by a rogue target
# FUTURE: add a platform layer to the map so we could use for, eg, freebsd/macos/etc?
INTERPRETER_PYTHON_FALLBACK:
name: Ordered list of Python interpreters to check for in discovery
default:
- python3.10
- python3.9
- python3.8
- python3.7
- python3.6
- python3.5
- /usr/bin/python3
- /usr/libexec/platform-python
- python2.7
- python2.6
- /usr/bin/python
- python
vars:
- name: ansible_interpreter_python_fallback
type: list
version_added: "2.8"
TRANSFORM_INVALID_GROUP_CHARS:
name: Transform invalid characters in group names
default: 'never'
description:
- Make ansible transform invalid characters in group names supplied by inventory sources.
- If 'never' it will allow for the group name but warn about the issue.
- When 'ignore', it does the same as 'never', without issuing a warning.
- When 'always' it will replace any invalid characters with '_' (underscore) and warn the user
- When 'silently', it does the same as 'always', without issuing a warning.
env: [{name: ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS}]
ini:
- {key: force_valid_group_names, section: defaults}
type: string
choices: ['always', 'never', 'ignore', 'silently']
version_added: '2.8'
INVALID_TASK_ATTRIBUTE_FAILED:
name: Controls whether invalid attributes for a task result in errors instead of warnings
default: True
description: If 'false', invalid attributes for a task will result in warnings instead of errors
type: boolean
env:
- name: ANSIBLE_INVALID_TASK_ATTRIBUTE_FAILED
ini:
- key: invalid_task_attribute_failed
section: defaults
version_added: "2.7"
INVENTORY_ANY_UNPARSED_IS_FAILED:
name: Controls whether any unparseable inventory source is a fatal error
default: False
description: >
If 'true', it is a fatal error when any given inventory source
cannot be successfully parsed by any available inventory plugin;
otherwise, this situation only attracts a warning.
type: boolean
env: [{name: ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED}]
ini:
- {key: any_unparsed_is_failed, section: inventory}
version_added: "2.7"
INVENTORY_CACHE_ENABLED:
name: Inventory caching enabled
default: False
description:
- Toggle to turn on inventory caching.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory configuration.
- This message will be removed in 2.16.
env: [{name: ANSIBLE_INVENTORY_CACHE}]
ini:
- {key: cache, section: inventory}
type: bool
INVENTORY_CACHE_PLUGIN:
name: Inventory cache plugin
description:
- The plugin for caching inventory.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
- This message will be removed in 2.16.
env: [{name: ANSIBLE_INVENTORY_CACHE_PLUGIN}]
ini:
- {key: cache_plugin, section: inventory}
INVENTORY_CACHE_PLUGIN_CONNECTION:
name: Inventory cache plugin URI to override the defaults section
description:
- The inventory cache connection.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
- This message will be removed in 2.16.
env: [{name: ANSIBLE_INVENTORY_CACHE_CONNECTION}]
ini:
- {key: cache_connection, section: inventory}
INVENTORY_CACHE_PLUGIN_PREFIX:
name: Inventory cache plugin table prefix
description:
- The table prefix for the cache plugin.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
- This message will be removed in 2.16.
env: [{name: ANSIBLE_INVENTORY_CACHE_PLUGIN_PREFIX}]
default: ansible_inventory_
ini:
- {key: cache_prefix, section: inventory}
INVENTORY_CACHE_TIMEOUT:
name: Inventory cache plugin expiration timeout
description:
- Expiration timeout for the inventory cache plugin data.
- This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`.
- The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration.
- This message will be removed in 2.16.
default: 3600
env: [{name: ANSIBLE_INVENTORY_CACHE_TIMEOUT}]
ini:
- {key: cache_timeout, section: inventory}
INVENTORY_ENABLED:
name: Active Inventory plugins
default: ['host_list', 'script', 'auto', 'yaml', 'ini', 'toml']
description: List of enabled inventory plugins, it also determines the order in which they are used.
env: [{name: ANSIBLE_INVENTORY_ENABLED}]
ini:
- {key: enable_plugins, section: inventory}
type: list
INVENTORY_EXPORT:
name: Set ansible-inventory into export mode
default: False
description: Controls if ansible-inventory will accurately reflect Ansible's view into inventory or its optimized for exporting.
env: [{name: ANSIBLE_INVENTORY_EXPORT}]
ini:
- {key: export, section: inventory}
type: bool
INVENTORY_IGNORE_EXTS:
name: Inventory ignore extensions
default: "{{(REJECT_EXTS + ('.orig', '.ini', '.cfg', '.retry'))}}"
description: List of extensions to ignore when using a directory as an inventory source
env: [{name: ANSIBLE_INVENTORY_IGNORE}]
ini:
- {key: inventory_ignore_extensions, section: defaults}
- {key: ignore_extensions, section: inventory}
type: list
INVENTORY_IGNORE_PATTERNS:
name: Inventory ignore patterns
default: []
description: List of patterns to ignore when using a directory as an inventory source
env: [{name: ANSIBLE_INVENTORY_IGNORE_REGEX}]
ini:
- {key: inventory_ignore_patterns, section: defaults}
- {key: ignore_patterns, section: inventory}
type: list
INVENTORY_UNPARSED_IS_FAILED:
name: Unparsed Inventory failure
default: False
description: >
If 'true' it is a fatal error if every single potential inventory
source fails to parse, otherwise this situation will only attract a
warning.
env: [{name: ANSIBLE_INVENTORY_UNPARSED_FAILED}]
ini:
- {key: unparsed_is_failed, section: inventory}
type: bool
JINJA2_NATIVE_WARNING:
name: Running older than required Jinja version for jinja2_native warning
default: True
description: Toggle to control showing warnings related to running a Jinja version
older than required for jinja2_native
env:
- name: ANSIBLE_JINJA2_NATIVE_WARNING
deprecated:
why: This option is no longer used in the Ansible Core code base.
version: "2.17"
ini:
- {key: jinja2_native_warning, section: defaults}
type: boolean
MAX_FILE_SIZE_FOR_DIFF:
name: Diff maximum file size
default: 104448
description: Maximum size of files to be considered for diff display
env: [{name: ANSIBLE_MAX_DIFF_SIZE}]
ini:
- {key: max_diff_size, section: defaults}
type: int
NETWORK_GROUP_MODULES:
name: Network module families
default: [eos, nxos, ios, iosxr, junos, enos, ce, vyos, sros, dellos9, dellos10, dellos6, asa, aruba, aireos, bigip, ironware, onyx, netconf, exos, voss, slxos]
description: 'TODO: write it'
env:
- name: NETWORK_GROUP_MODULES
deprecated:
why: environment variables without ``ANSIBLE_`` prefix are deprecated
version: "2.12"
alternatives: the ``ANSIBLE_NETWORK_GROUP_MODULES`` environment variable
- name: ANSIBLE_NETWORK_GROUP_MODULES
ini:
- {key: network_group_modules, section: defaults}
type: list
yaml: {key: defaults.network_group_modules}
INJECT_FACTS_AS_VARS:
default: True
description:
- Facts are available inside the `ansible_facts` variable, this setting also pushes them as their own vars in the main namespace.
- Unlike inside the `ansible_facts` dictionary, these will have an `ansible_` prefix.
env: [{name: ANSIBLE_INJECT_FACT_VARS}]
ini:
- {key: inject_facts_as_vars, section: defaults}
type: boolean
version_added: "2.5"
MODULE_IGNORE_EXTS:
name: Module ignore extensions
default: "{{(REJECT_EXTS + ('.yaml', '.yml', '.ini'))}}"
description:
- List of extensions to ignore when looking for modules to load
- This is for rejecting script and binary module fallback extensions
env: [{name: ANSIBLE_MODULE_IGNORE_EXTS}]
ini:
- {key: module_ignore_exts, section: defaults}
type: list
OLD_PLUGIN_CACHE_CLEARING:
description: Previously Ansible would only clear some of the plugin loading caches when loading new roles, this led to some behaviours in which a plugin loaded in prevoius plays would be unexpectedly 'sticky'. This setting allows to return to that behaviour.
env: [{name: ANSIBLE_OLD_PLUGIN_CACHE_CLEAR}]
ini:
- {key: old_plugin_cache_clear, section: defaults}
type: boolean
default: False
version_added: "2.8"
PARAMIKO_HOST_KEY_AUTO_ADD:
# TODO: move to plugin
default: False
description: 'TODO: write it'
env: [{name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD}]
ini:
- {key: host_key_auto_add, section: paramiko_connection}
type: boolean
PARAMIKO_LOOK_FOR_KEYS:
name: look for keys
default: True
description: 'TODO: write it'
env: [{name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS}]
ini:
- {key: look_for_keys, section: paramiko_connection}
type: boolean
PERSISTENT_CONTROL_PATH_DIR:
name: Persistence socket path
default: ~/.ansible/pc
description: Path to socket to be used by the connection persistence system.
env: [{name: ANSIBLE_PERSISTENT_CONTROL_PATH_DIR}]
ini:
- {key: control_path_dir, section: persistent_connection}
type: path
PERSISTENT_CONNECT_TIMEOUT:
name: Persistence timeout
default: 30
description: This controls how long the persistent connection will remain idle before it is destroyed.
env: [{name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT}]
ini:
- {key: connect_timeout, section: persistent_connection}
type: integer
PERSISTENT_CONNECT_RETRY_TIMEOUT:
name: Persistence connection retry timeout
default: 15
description: This controls the retry timeout for persistent connection to connect to the local domain socket.
env: [{name: ANSIBLE_PERSISTENT_CONNECT_RETRY_TIMEOUT}]
ini:
- {key: connect_retry_timeout, section: persistent_connection}
type: integer
PERSISTENT_COMMAND_TIMEOUT:
name: Persistence command timeout
default: 30
description: This controls the amount of time to wait for response from remote device before timing out persistent connection.
env: [{name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT}]
ini:
- {key: command_timeout, section: persistent_connection}
type: int
PLAYBOOK_DIR:
name: playbook dir override for non-playbook CLIs (ala --playbook-dir)
version_added: "2.9"
description:
- A number of non-playbook CLIs have a ``--playbook-dir`` argument; this sets the default value for it.
env: [{name: ANSIBLE_PLAYBOOK_DIR}]
ini: [{key: playbook_dir, section: defaults}]
type: path
PLAYBOOK_VARS_ROOT:
name: playbook vars files root
default: top
version_added: "2.4.1"
description:
- This sets which playbook dirs will be used as a root to process vars plugins, which includes finding host_vars/group_vars
- The ``top`` option follows the traditional behaviour of using the top playbook in the chain to find the root directory.
- The ``bottom`` option follows the 2.4.0 behaviour of using the current playbook to find the root directory.
- The ``all`` option examines from the first parent to the current playbook.
env: [{name: ANSIBLE_PLAYBOOK_VARS_ROOT}]
ini:
- {key: playbook_vars_root, section: defaults}
choices: [ top, bottom, all ]
PLUGIN_FILTERS_CFG:
name: Config file for limiting valid plugins
default: null
version_added: "2.5.0"
description:
- "A path to configuration for filtering which plugins installed on the system are allowed to be used."
- "See :ref:`plugin_filtering_config` for details of the filter file's format."
- " The default is /etc/ansible/plugin_filters.yml"
ini:
- key: plugin_filters_cfg
section: default
deprecated:
why: specifying "plugin_filters_cfg" under the "default" section is deprecated
version: "2.12"
alternatives: the "defaults" section instead
- key: plugin_filters_cfg
section: defaults
type: path
PYTHON_MODULE_RLIMIT_NOFILE:
name: Adjust maximum file descriptor soft limit during Python module execution
description:
- Attempts to set RLIMIT_NOFILE soft limit to the specified value when executing Python modules (can speed up subprocess usage on
Python 2.x. See https://bugs.python.org/issue11284). The value will be limited by the existing hard limit. Default
value of 0 does not attempt to adjust existing system-defined limits.
default: 0
env:
- {name: ANSIBLE_PYTHON_MODULE_RLIMIT_NOFILE}
ini:
- {key: python_module_rlimit_nofile, section: defaults}
vars:
- {name: ansible_python_module_rlimit_nofile}
version_added: '2.8'
RETRY_FILES_ENABLED:
name: Retry files
default: False
description: This controls whether a failed Ansible playbook should create a .retry file.
env: [{name: ANSIBLE_RETRY_FILES_ENABLED}]
ini:
- {key: retry_files_enabled, section: defaults}
type: bool
RETRY_FILES_SAVE_PATH:
name: Retry files path
default: ~
description:
- This sets the path in which Ansible will save .retry files when a playbook fails and retry files are enabled.
- This file will be overwritten after each run with the list of failed hosts from all plays.
env: [{name: ANSIBLE_RETRY_FILES_SAVE_PATH}]
ini:
- {key: retry_files_save_path, section: defaults}
type: path
RUN_VARS_PLUGINS:
name: When should vars plugins run relative to inventory
default: demand
description:
- This setting can be used to optimize vars_plugin usage depending on user's inventory size and play selection.
- Setting to C(demand) will run vars_plugins relative to inventory sources anytime vars are 'demanded' by tasks.
- Setting to C(start) will run vars_plugins relative to inventory sources after importing that inventory source.
env: [{name: ANSIBLE_RUN_VARS_PLUGINS}]
ini:
- {key: run_vars_plugins, section: defaults}
type: str
choices: ['demand', 'start']
version_added: "2.10"
SHOW_CUSTOM_STATS:
name: Display custom stats
default: False
description: 'This adds the custom stats set via the set_stats plugin to the default output'
env: [{name: ANSIBLE_SHOW_CUSTOM_STATS}]
ini:
- {key: show_custom_stats, section: defaults}
type: bool
STRING_TYPE_FILTERS:
name: Filters to preserve strings
default: [string, to_json, to_nice_json, to_yaml, to_nice_yaml, ppretty, json]
description:
- "This list of filters avoids 'type conversion' when templating variables"
- Useful when you want to avoid conversion into lists or dictionaries for JSON strings, for example.
env: [{name: ANSIBLE_STRING_TYPE_FILTERS}]
ini:
- {key: dont_type_filters, section: jinja2}
type: list
SYSTEM_WARNINGS:
name: System warnings
default: True
description:
- Allows disabling of warnings related to potential issues on the system running ansible itself (not on the managed hosts)
- These may include warnings about 3rd party packages or other conditions that should be resolved if possible.
env: [{name: ANSIBLE_SYSTEM_WARNINGS}]
ini:
- {key: system_warnings, section: defaults}
type: boolean
TAGS_RUN:
name: Run Tags
default: []
type: list
description: default list of tags to run in your plays, Skip Tags has precedence.
env: [{name: ANSIBLE_RUN_TAGS}]
ini:
- {key: run, section: tags}
version_added: "2.5"
TAGS_SKIP:
name: Skip Tags
default: []
type: list
description: default list of tags to skip in your plays, has precedence over Run Tags
env: [{name: ANSIBLE_SKIP_TAGS}]
ini:
- {key: skip, section: tags}
version_added: "2.5"
TASK_TIMEOUT:
name: Task Timeout
default: 0
description:
- Set the maximum time (in seconds) that a task can run for.
- If set to 0 (the default) there is no timeout.
env: [{name: ANSIBLE_TASK_TIMEOUT}]
ini:
- {key: task_timeout, section: defaults}
type: integer
version_added: '2.10'
WORKER_SHUTDOWN_POLL_COUNT:
name: Worker Shutdown Poll Count
default: 0
description:
- The maximum number of times to check Task Queue Manager worker processes to verify they have exited cleanly.
- After this limit is reached any worker processes still running will be terminated.
- This is for internal use only.
env: [{name: ANSIBLE_WORKER_SHUTDOWN_POLL_COUNT}]
type: integer
version_added: '2.10'
WORKER_SHUTDOWN_POLL_DELAY:
name: Worker Shutdown Poll Delay
default: 0.1
description:
- The number of seconds to sleep between polling loops when checking Task Queue Manager worker processes to verify they have exited cleanly.
- This is for internal use only.
env: [{name: ANSIBLE_WORKER_SHUTDOWN_POLL_DELAY}]
type: float
version_added: '2.10'
USE_PERSISTENT_CONNECTIONS:
name: Persistence
default: False
description: Toggles the use of persistence for connections.
env: [{name: ANSIBLE_USE_PERSISTENT_CONNECTIONS}]
ini:
- {key: use_persistent_connections, section: defaults}
type: boolean
VARIABLE_PLUGINS_ENABLED:
name: Vars plugin enabled list
default: ['host_group_vars']
description: Whitelist for variable plugins that require it.
env: [{name: ANSIBLE_VARS_ENABLED}]
ini:
- {key: vars_plugins_enabled, section: defaults}
type: list
version_added: "2.10"
VARIABLE_PRECEDENCE:
name: Group variable precedence
default: ['all_inventory', 'groups_inventory', 'all_plugins_inventory', 'all_plugins_play', 'groups_plugins_inventory', 'groups_plugins_play']
description: Allows to change the group variable precedence merge order.
env: [{name: ANSIBLE_PRECEDENCE}]
ini:
- {key: precedence, section: defaults}
type: list
version_added: "2.4"
WIN_ASYNC_STARTUP_TIMEOUT:
name: Windows Async Startup Timeout
default: 5
description:
- For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling),
this is how long, in seconds, to wait for the task spawned by Ansible to connect back to the named pipe used
on Windows systems. The default is 5 seconds. This can be too low on slower systems, or systems under heavy load.
- This is not the total time an async command can run for, but is a separate timeout to wait for an async command to
start. The task will only start to be timed against its async_timeout once it has connected to the pipe, so the
overall maximum duration the task can take will be extended by the amount specified here.
env: [{name: ANSIBLE_WIN_ASYNC_STARTUP_TIMEOUT}]
ini:
- {key: win_async_startup_timeout, section: defaults}
type: integer
vars:
- {name: ansible_win_async_startup_timeout}
version_added: '2.10'
YAML_FILENAME_EXTENSIONS:
name: Valid YAML extensions
default: [".yml", ".yaml", ".json"]
description:
- "Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these."
- 'This affects vars_files, include_vars, inventory and vars plugins among others.'
env:
- name: ANSIBLE_YAML_FILENAME_EXT
ini:
- section: defaults
key: yaml_valid_extensions
type: list
NETCONF_SSH_CONFIG:
description: This variable is used to enable bastion/jump host with netconf connection. If set to True the bastion/jump
host ssh settings should be present in ~/.ssh/config file, alternatively it can be set
to custom ssh configuration file path to read the bastion/jump host settings.
env: [{name: ANSIBLE_NETCONF_SSH_CONFIG}]
ini:
- {key: ssh_config, section: netconf_connection}
yaml: {key: netconf_connection.ssh_config}
default: null
STRING_CONVERSION_ACTION:
version_added: '2.8'
description:
- Action to take when a module parameter value is converted to a string (this does not affect variables).
For string parameters, values such as '1.00', "['a', 'b',]", and 'yes', 'y', etc.
will be converted by the YAML parser unless fully quoted.
- Valid options are 'error', 'warn', and 'ignore'.
- Since 2.8, this option defaults to 'warn' but will change to 'error' in 2.12.
default: 'warn'
env:
- name: ANSIBLE_STRING_CONVERSION_ACTION
ini:
- section: defaults
key: string_conversion_action
type: string
VALIDATE_ACTION_GROUP_METADATA:
version_added: '2.12'
description:
- A toggle to disable validating a collection's 'metadata' entry for a module_defaults action group.
Metadata containing unexpected fields or value types will produce a warning when this is True.
default: True
env: [{name: ANSIBLE_VALIDATE_ACTION_GROUP_METADATA}]
ini:
- section: defaults
key: validate_action_group_metadata
type: bool
VERBOSE_TO_STDERR:
version_added: '2.8'
description:
- Force 'verbose' option to use stderr instead of stdout
default: False
env:
- name: ANSIBLE_VERBOSE_TO_STDERR
ini:
- section: defaults
key: verbose_to_stderr
type: bool
...
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,560 |
`INTERPRETER_PYTHON_DISTRO_MAP` YAML anchors maintainability
|
### Summary
After looking over some recent PRs to add additional aliases to `rhelish`, I'm beginning to believe that using YAML anchors isn't the right thing for long term maintainability.
I think we should add a step in the Python code that uses the distro family map from `module_utils.facts` to select the "rhelish" option from `INTERPRETER_PYTHON_DISTRO_MAP` unless a specific distro entry exists.
This will help us maintain 1 list, and limit the number of changes we need to make to the config to handle new distros.
### Issue Type
Feature Idea
### Component Name
lib/ansible/executor/interpreter_discovery.py
### Additional Information
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75560
|
https://github.com/ansible/ansible/pull/76815
|
cfe4bdc1b9a91f043fa23f78478e0c20f05175ae
|
d7d1bd6269fa463a8ac030047c285fc3677ad8a0
| 2021-08-24T15:13:05Z |
python
| 2022-01-21T17:46:45Z |
lib/ansible/executor/interpreter_discovery.py
|
# Copyright: (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import bisect
import json
import pkgutil
import re
from ansible import constants as C
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.distro import LinuxDistribution
from ansible.utils.display import Display
from ansible.utils.plugin_docs import get_versioned_doclink
from ansible.module_utils.compat.version import LooseVersion
from traceback import format_exc
display = Display()
foundre = re.compile(r'(?s)PLATFORM[\r\n]+(.*)FOUND(.*)ENDFOUND')
class InterpreterDiscoveryRequiredError(Exception):
def __init__(self, message, interpreter_name, discovery_mode):
super(InterpreterDiscoveryRequiredError, self).__init__(message)
self.interpreter_name = interpreter_name
self.discovery_mode = discovery_mode
def __str__(self):
return self.message
def __repr__(self):
# TODO: proper repr impl
return self.message
def discover_interpreter(action, interpreter_name, discovery_mode, task_vars):
# interpreter discovery is a 2-step process with the target. First, we use a simple shell-agnostic bootstrap to
# get the system type from uname, and find any random Python that can get us the info we need. For supported
# target OS types, we'll dispatch a Python script that calls plaform.dist() (for older platforms, where available)
# and brings back /etc/os-release (if present). The proper Python path is looked up in a table of known
# distros/versions with included Pythons; if nothing is found, depending on the discovery mode, either the
# default fallback of /usr/bin/python is used (if we know it's there), or discovery fails.
# FUTURE: add logical equivalence for "python3" in the case of py3-only modules?
if interpreter_name != 'python':
raise ValueError('Interpreter discovery not supported for {0}'.format(interpreter_name))
host = task_vars.get('inventory_hostname', 'unknown')
res = None
platform_type = 'unknown'
found_interpreters = [u'/usr/bin/python'] # fallback value
is_auto_legacy = discovery_mode.startswith('auto_legacy')
is_silent = discovery_mode.endswith('_silent')
try:
platform_python_map = C.config.get_config_value('INTERPRETER_PYTHON_DISTRO_MAP', variables=task_vars)
bootstrap_python_list = C.config.get_config_value('INTERPRETER_PYTHON_FALLBACK', variables=task_vars)
display.vvv(msg=u"Attempting {0} interpreter discovery".format(interpreter_name), host=host)
# not all command -v impls accept a list of commands, so we have to call it once per python
command_list = ["command -v '%s'" % py for py in bootstrap_python_list]
shell_bootstrap = "echo PLATFORM; uname; echo FOUND; {0}; echo ENDFOUND".format('; '.join(command_list))
# FUTURE: in most cases we probably don't want to use become, but maybe sometimes we do?
res = action._low_level_execute_command(shell_bootstrap, sudoable=False)
raw_stdout = res.get('stdout', u'')
match = foundre.match(raw_stdout)
if not match:
display.debug(u'raw interpreter discovery output: {0}'.format(raw_stdout), host=host)
raise ValueError('unexpected output from Python interpreter discovery')
platform_type = match.groups()[0].lower().strip()
found_interpreters = [interp.strip() for interp in match.groups()[1].splitlines() if interp.startswith('/')]
display.debug(u"found interpreters: {0}".format(found_interpreters), host=host)
if not found_interpreters:
if not is_silent:
action._discovery_warnings.append(u'No python interpreters found for '
u'host {0} (tried {1})'.format(host, bootstrap_python_list))
# this is lame, but returning None or throwing an exception is uglier
return u'/usr/bin/python'
if platform_type != 'linux':
raise NotImplementedError('unsupported platform for extended discovery: {0}'.format(to_native(platform_type)))
platform_script = pkgutil.get_data('ansible.executor.discovery', 'python_target.py')
# FUTURE: respect pipelining setting instead of just if the connection supports it?
if action._connection.has_pipelining:
res = action._low_level_execute_command(found_interpreters[0], sudoable=False, in_data=platform_script)
else:
# FUTURE: implement on-disk case (via script action or ?)
raise NotImplementedError('pipelining support required for extended interpreter discovery')
platform_info = json.loads(res.get('stdout'))
distro, version = _get_linux_distro(platform_info)
if not distro or not version:
raise NotImplementedError('unable to get Linux distribution/version info')
version_map = platform_python_map.get(distro.lower().strip())
if not version_map:
raise NotImplementedError('unsupported Linux distribution: {0}'.format(distro))
platform_interpreter = to_text(_version_fuzzy_match(version, version_map), errors='surrogate_or_strict')
# provide a transition period for hosts that were using /usr/bin/python previously (but shouldn't have been)
if is_auto_legacy:
if platform_interpreter != u'/usr/bin/python' and u'/usr/bin/python' in found_interpreters:
if not is_silent:
action._discovery_warnings.append(
u"Distribution {0} {1} on host {2} should use {3}, but is using "
u"/usr/bin/python for backward compatibility with prior Ansible releases. "
u"See {4} for more information"
.format(distro, version, host, platform_interpreter,
get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
return u'/usr/bin/python'
if platform_interpreter not in found_interpreters:
if platform_interpreter not in bootstrap_python_list:
# sanity check to make sure we looked for it
if not is_silent:
action._discovery_warnings \
.append(u"Platform interpreter {0} on host {1} is missing from bootstrap list"
.format(platform_interpreter, host))
if not is_silent:
action._discovery_warnings \
.append(u"Distribution {0} {1} on host {2} should use {3}, but is using {4}, since the "
u"discovered platform python interpreter was not present. See {5} "
u"for more information."
.format(distro, version, host, platform_interpreter, found_interpreters[0],
get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
return found_interpreters[0]
return platform_interpreter
except NotImplementedError as ex:
display.vvv(msg=u'Python interpreter discovery fallback ({0})'.format(to_text(ex)), host=host)
except Exception as ex:
if not is_silent:
display.warning(msg=u'Unhandled error in Python interpreter discovery for host {0}: {1}'.format(host, to_text(ex)))
display.debug(msg=u'Interpreter discovery traceback:\n{0}'.format(to_text(format_exc())), host=host)
if res and res.get('stderr'):
display.vvv(msg=u'Interpreter discovery remote stderr:\n{0}'.format(to_text(res.get('stderr'))), host=host)
if not is_silent:
action._discovery_warnings \
.append(u"Platform {0} on host {1} is using the discovered Python interpreter at {2}, but future installation of "
u"another Python interpreter could change the meaning of that path. See {3} "
u"for more information."
.format(platform_type, host, found_interpreters[0],
get_versioned_doclink('reference_appendices/interpreter_discovery.html')))
return found_interpreters[0]
def _get_linux_distro(platform_info):
dist_result = platform_info.get('platform_dist_result', [])
if len(dist_result) == 3 and any(dist_result):
return dist_result[0], dist_result[1]
osrelease_content = platform_info.get('osrelease_content')
if not osrelease_content:
return u'', u''
osr = LinuxDistribution._parse_os_release_content(osrelease_content)
return osr.get('id', u''), osr.get('version_id', u'')
def _version_fuzzy_match(version, version_map):
# try exact match first
res = version_map.get(version)
if res:
return res
sorted_looseversions = sorted([LooseVersion(v) for v in version_map.keys()])
find_looseversion = LooseVersion(version)
# slot match; return nearest previous version we're newer than
kpos = bisect.bisect(sorted_looseversions, find_looseversion)
if kpos == 0:
# older than everything in the list, return the oldest version
# TODO: warning-worthy?
return version_map.get(sorted_looseversions[0].vstring)
# TODO: is "past the end of the list" warning-worthy too (at least if it's not a major version match)?
# return the next-oldest entry that we're newer than...
return version_map.get(sorted_looseversions[kpos - 1].vstring)
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,017 |
user module can't handle password expiration parameters correctly
|
### Summary
When using the user module to modify password_expire_min=0, the processing result does not meet expectations .
I guess the problem is caused by the following code in the user module.
```
# deal with password expire min
if user.password_expire_min:
if user.user_exists():
(rc, out, err) = user.set_password_expire_min()
```
Modifying to the following code may fix this problem.
```
# deal with password expire min
if user.password_expire_min is not None:
if user.user_exists():
(rc, out, err) = user.set_password_expire_min()
```
### Issue Type
Bug Report
### Component Name
user
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.1]
config file = None
configured module search path = ['/home/betadmin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /app/python3/lib/python3.8/site-packages/ansible
ansible collection location = /home/betadmin/.ansible/collections:/usr/share/ansible/collections
executable location = /app/python3/bin/ansible
python version = 3.8.5 (default, Sep 18 2020, 14:55:53) [GCC 4.8.5 20150623 (Red Hat 4.8.5-16)]
jinja version = 2.11.2
libyaml = False
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
RHEL 7.4
### Steps to Reproduce
- name: Edit password_expire_min
user:
name: test
password_expire_min: 0
### Expected Results
Minimum number of days between password change : 0
### Actual Results
Minimum number of days between password change : 2
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75017
|
https://github.com/ansible/ansible/pull/75390
|
5a690239411567e125ae5da5d03494cc0acf6ebf
|
dbde2c2ae3b03469abbe8f2c98b50ffedcf7975f
| 2021-06-16T02:19:51Z |
python
| 2022-01-24T19:52:38Z |
changelogs/fragments/75017-user-password-expiry.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,017 |
user module can't handle password expiration parameters correctly
|
### Summary
When using the user module to modify password_expire_min=0, the processing result does not meet expectations .
I guess the problem is caused by the following code in the user module.
```
# deal with password expire min
if user.password_expire_min:
if user.user_exists():
(rc, out, err) = user.set_password_expire_min()
```
Modifying to the following code may fix this problem.
```
# deal with password expire min
if user.password_expire_min is not None:
if user.user_exists():
(rc, out, err) = user.set_password_expire_min()
```
### Issue Type
Bug Report
### Component Name
user
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.1]
config file = None
configured module search path = ['/home/betadmin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /app/python3/lib/python3.8/site-packages/ansible
ansible collection location = /home/betadmin/.ansible/collections:/usr/share/ansible/collections
executable location = /app/python3/bin/ansible
python version = 3.8.5 (default, Sep 18 2020, 14:55:53) [GCC 4.8.5 20150623 (Red Hat 4.8.5-16)]
jinja version = 2.11.2
libyaml = False
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
RHEL 7.4
### Steps to Reproduce
- name: Edit password_expire_min
user:
name: test
password_expire_min: 0
### Expected Results
Minimum number of days between password change : 0
### Actual Results
Minimum number of days between password change : 2
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75017
|
https://github.com/ansible/ansible/pull/75390
|
5a690239411567e125ae5da5d03494cc0acf6ebf
|
dbde2c2ae3b03469abbe8f2c98b50ffedcf7975f
| 2021-06-16T02:19:51Z |
python
| 2022-01-24T19:52:38Z |
lib/ansible/modules/user.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Stephen Fromm <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
module: user
version_added: "0.2"
short_description: Manage user accounts
description:
- Manage user accounts and user attributes.
- For Windows targets, use the M(ansible.windows.win_user) module instead.
options:
name:
description:
- Name of the user to create, remove or modify.
type: str
required: true
aliases: [ user ]
uid:
description:
- Optionally sets the I(UID) of the user.
type: int
comment:
description:
- Optionally sets the description (aka I(GECOS)) of user account.
type: str
hidden:
description:
- macOS only, optionally hide the user from the login window and system preferences.
- The default will be C(yes) if the I(system) option is used.
type: bool
version_added: "2.6"
non_unique:
description:
- Optionally when used with the -u option, this option allows to change the user ID to a non-unique value.
type: bool
default: no
version_added: "1.1"
seuser:
description:
- Optionally sets the seuser type (user_u) on selinux enabled systems.
type: str
version_added: "2.1"
group:
description:
- Optionally sets the user's primary group (takes a group name).
type: str
groups:
description:
- List of groups user will be added to.
- By default, the user is removed from all other groups. Configure C(append) to modify this.
- When set to an empty string C(''),
the user is removed from all groups except the primary group.
- Before Ansible 2.3, the only input format allowed was a comma separated string.
type: list
elements: str
append:
description:
- If C(yes), add the user to the groups specified in C(groups).
- If C(no), user will only be added to the groups specified in C(groups),
removing them from all other groups.
type: bool
default: no
shell:
description:
- Optionally set the user's shell.
- On macOS, before Ansible 2.5, the default shell for non-system users was C(/usr/bin/false).
Since Ansible 2.5, the default shell for non-system users on macOS is C(/bin/bash).
- See notes for details on how other operating systems determine the default shell by
the underlying tool.
type: str
home:
description:
- Optionally set the user's home directory.
type: path
skeleton:
description:
- Optionally set a home skeleton directory.
- Requires C(create_home) option!
type: str
version_added: "2.0"
password:
description:
- Optionally set the user's password to this crypted value.
- On macOS systems, this value has to be cleartext. Beware of security issues.
- To create a an account with a locked/disabled password on Linux systems, set this to C('!') or C('*').
- To create a an account with a locked/disabled password on OpenBSD, set this to C('*************').
- See L(FAQ entry,https://docs.ansible.com/ansible/latest/reference_appendices/faq.html#how-do-i-generate-encrypted-passwords-for-the-user-module)
for details on various ways to generate these password values.
type: str
state:
description:
- Whether the account should exist or not, taking action if the state is different from what is stated.
type: str
choices: [ absent, present ]
default: present
create_home:
description:
- Unless set to C(no), a home directory will be made for the user
when the account is created or if the home directory does not exist.
- Changed from C(createhome) to C(create_home) in Ansible 2.5.
type: bool
default: yes
aliases: [ createhome ]
move_home:
description:
- "If set to C(yes) when used with C(home: ), attempt to move the user's old home
directory to the specified directory if it isn't there already and the old home exists."
type: bool
default: no
system:
description:
- When creating an account C(state=present), setting this to C(yes) makes the user a system account.
- This setting cannot be changed on existing users.
type: bool
default: no
force:
description:
- This only affects C(state=absent), it forces removal of the user and associated directories on supported platforms.
- The behavior is the same as C(userdel --force), check the man page for C(userdel) on your system for details and support.
- When used with C(generate_ssh_key=yes) this forces an existing key to be overwritten.
type: bool
default: no
remove:
description:
- This only affects C(state=absent), it attempts to remove directories associated with the user.
- The behavior is the same as C(userdel --remove), check the man page for details and support.
type: bool
default: no
login_class:
description:
- Optionally sets the user's login class, a feature of most BSD OSs.
type: str
generate_ssh_key:
description:
- Whether to generate a SSH key for the user in question.
- This will B(not) overwrite an existing SSH key unless used with C(force=yes).
type: bool
default: no
version_added: "0.9"
ssh_key_bits:
description:
- Optionally specify number of bits in SSH key to create.
type: int
default: default set by ssh-keygen
version_added: "0.9"
ssh_key_type:
description:
- Optionally specify the type of SSH key to generate.
- Available SSH key types will depend on implementation
present on target host.
type: str
default: rsa
version_added: "0.9"
ssh_key_file:
description:
- Optionally specify the SSH key filename.
- If this is a relative filename then it will be relative to the user's home directory.
- This parameter defaults to I(.ssh/id_rsa).
type: path
version_added: "0.9"
ssh_key_comment:
description:
- Optionally define the comment for the SSH key.
type: str
default: ansible-generated on $HOSTNAME
version_added: "0.9"
ssh_key_passphrase:
description:
- Set a passphrase for the SSH key.
- If no passphrase is provided, the SSH key will default to having no passphrase.
type: str
version_added: "0.9"
update_password:
description:
- C(always) will update passwords if they differ.
- C(on_create) will only set the password for newly created users.
type: str
choices: [ always, on_create ]
default: always
version_added: "1.3"
expires:
description:
- An expiry time for the user in epoch, it will be ignored on platforms that do not support this.
- Currently supported on GNU/Linux, FreeBSD, and DragonFlyBSD.
- Since Ansible 2.6 you can remove the expiry time by specifying a negative value.
Currently supported on GNU/Linux and FreeBSD.
type: float
version_added: "1.9"
password_lock:
description:
- Lock the password (C(usermod -L), C(usermod -U), C(pw lock)).
- Implementation differs by platform. This option does not always mean the user cannot login using other methods.
- This option does not disable the user, only lock the password.
- This must be set to C(False) in order to unlock a currently locked password. The absence of this parameter will not unlock a password.
- Currently supported on Linux, FreeBSD, DragonFlyBSD, NetBSD, OpenBSD.
type: bool
version_added: "2.6"
local:
description:
- Forces the use of "local" command alternatives on platforms that implement it.
- This is useful in environments that use centralized authentication when you want to manipulate the local users
(in other words, it uses C(luseradd) instead of C(useradd)).
- This will check C(/etc/passwd) for an existing account before invoking commands. If the local account database
exists somewhere other than C(/etc/passwd), this setting will not work properly.
- This requires that the above commands as well as C(/etc/passwd) must exist on the target host, otherwise it will be a fatal error.
type: bool
default: no
version_added: "2.4"
profile:
description:
- Sets the profile of the user.
- Does nothing when used with other platforms.
- Can set multiple profiles using comma separation.
- To delete all the profiles, use C(profile='').
- Currently supported on Illumos/Solaris.
type: str
version_added: "2.8"
authorization:
description:
- Sets the authorization of the user.
- Does nothing when used with other platforms.
- Can set multiple authorizations using comma separation.
- To delete all authorizations, use C(authorization='').
- Currently supported on Illumos/Solaris.
type: str
version_added: "2.8"
role:
description:
- Sets the role of the user.
- Does nothing when used with other platforms.
- Can set multiple roles using comma separation.
- To delete all roles, use C(role='').
- Currently supported on Illumos/Solaris.
type: str
version_added: "2.8"
password_expire_max:
description:
- Maximum number of days between password change.
- Supported on Linux only.
type: int
version_added: "2.11"
password_expire_min:
description:
- Minimum number of days between password change.
- Supported on Linux only.
type: int
version_added: "2.11"
umask:
description:
- Sets the umask of the user.
- Does nothing when used with other platforms.
- Currently supported on Linux.
- Requires C(local) is omitted or False.
type: str
version_added: "2.12"
extends_documentation_fragment: action_common_attributes
attributes:
check_mode:
support: full
diff_mode:
support: none
platform:
platforms: posix
notes:
- There are specific requirements per platform on user management utilities. However
they generally come pre-installed with the system and Ansible will require they
are present at runtime. If they are not, a descriptive error message will be shown.
- On SunOS platforms, the shadow file is backed up automatically since this module edits it directly.
On other platforms, the shadow file is backed up by the underlying tools used by this module.
- On macOS, this module uses C(dscl) to create, modify, and delete accounts. C(dseditgroup) is used to
modify group membership. Accounts are hidden from the login window by modifying
C(/Library/Preferences/com.apple.loginwindow.plist).
- On FreeBSD, this module uses C(pw useradd) and C(chpass) to create, C(pw usermod) and C(chpass) to modify,
C(pw userdel) remove, C(pw lock) to lock, and C(pw unlock) to unlock accounts.
- On all other platforms, this module uses C(useradd) to create, C(usermod) to modify, and
C(userdel) to remove accounts.
seealso:
- module: ansible.posix.authorized_key
- module: ansible.builtin.group
- module: ansible.windows.win_user
author:
- Stephen Fromm (@sfromm)
'''
EXAMPLES = r'''
- name: Add the user 'johnd' with a specific uid and a primary group of 'admin'
ansible.builtin.user:
name: johnd
comment: John Doe
uid: 1040
group: admin
- name: Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups
ansible.builtin.user:
name: james
shell: /bin/bash
groups: admins,developers
append: yes
- name: Remove the user 'johnd'
ansible.builtin.user:
name: johnd
state: absent
remove: yes
- name: Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa
ansible.builtin.user:
name: jsmith
generate_ssh_key: yes
ssh_key_bits: 2048
ssh_key_file: .ssh/id_rsa
- name: Added a consultant whose account you want to expire
ansible.builtin.user:
name: james18
shell: /bin/zsh
groups: developers
expires: 1422403387
- name: Starting at Ansible 2.6, modify user, remove expiry time
ansible.builtin.user:
name: james18
expires: -1
- name: Set maximum expiration date for password
user:
name: ram19
password_expire_max: 10
- name: Set minimum expiration date for password
user:
name: pushkar15
password_expire_min: 5
'''
RETURN = r'''
append:
description: Whether or not to append the user to groups.
returned: When state is C(present) and the user exists
type: bool
sample: True
comment:
description: Comment section from passwd file, usually the user name.
returned: When user exists
type: str
sample: Agent Smith
create_home:
description: Whether or not to create the home directory.
returned: When user does not exist and not check mode
type: bool
sample: True
force:
description: Whether or not a user account was forcibly deleted.
returned: When I(state) is C(absent) and user exists
type: bool
sample: False
group:
description: Primary user group ID
returned: When user exists
type: int
sample: 1001
groups:
description: List of groups of which the user is a member.
returned: When I(groups) is not empty and I(state) is C(present)
type: str
sample: 'chrony,apache'
home:
description: "Path to user's home directory."
returned: When I(state) is C(present)
type: str
sample: '/home/asmith'
move_home:
description: Whether or not to move an existing home directory.
returned: When I(state) is C(present) and user exists
type: bool
sample: False
name:
description: User account name.
returned: always
type: str
sample: asmith
password:
description: Masked value of the password.
returned: When I(state) is C(present) and I(password) is not empty
type: str
sample: 'NOT_LOGGING_PASSWORD'
remove:
description: Whether or not to remove the user account.
returned: When I(state) is C(absent) and user exists
type: bool
sample: True
shell:
description: User login shell.
returned: When I(state) is C(present)
type: str
sample: '/bin/bash'
ssh_fingerprint:
description: Fingerprint of generated SSH key.
returned: When I(generate_ssh_key) is C(True)
type: str
sample: '2048 SHA256:aYNHYcyVm87Igh0IMEDMbvW0QDlRQfE0aJugp684ko8 ansible-generated on host (RSA)'
ssh_key_file:
description: Path to generated SSH private key file.
returned: When I(generate_ssh_key) is C(True)
type: str
sample: /home/asmith/.ssh/id_rsa
ssh_public_key:
description: Generated SSH public key file.
returned: When I(generate_ssh_key) is C(True)
type: str
sample: >
'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC95opt4SPEC06tOYsJQJIuN23BbLMGmYo8ysVZQc4h2DZE9ugbjWWGS1/pweUGjVstgzMkBEeBCByaEf/RJKNecKRPeGd2Bw9DCj/bn5Z6rGfNENKBmo
618mUJBvdlEgea96QGjOwSB7/gmonduC7gsWDMNcOdSE3wJMTim4lddiBx4RgC9yXsJ6Tkz9BHD73MXPpT5ETnse+A3fw3IGVSjaueVnlUyUmOBf7fzmZbhlFVXf2Zi2rFTXqvbdGHKkzpw1U8eB8xFPP7y
d5u1u0e6Acju/8aZ/l17IDFiLke5IzlqIMRTEbDwLNeO84YQKWTm9fODHzhYe0yvxqLiK07 ansible-generated on host'
stderr:
description: Standard error from running commands.
returned: When stderr is returned by a command that is run
type: str
sample: Group wheels does not exist
stdout:
description: Standard output from running commands.
returned: When standard output is returned by the command that is run
type: str
sample:
system:
description: Whether or not the account is a system account.
returned: When I(system) is passed to the module and the account does not exist
type: bool
sample: True
uid:
description: User ID of the user account.
returned: When I(uid) is passed to the module
type: int
sample: 1044
password_expire_max:
description: Maximum number of days during which a password is valid.
returned: When user exists
type: int
sample: 20
password_expire_min:
description: Minimum number of days between password change
returned: When user exists
type: int
sample: 20
'''
import errno
import grp
import calendar
import os
import re
import pty
import pwd
import select
import shutil
import socket
import subprocess
import time
import math
from ansible.module_utils import distro
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.locale import get_best_parsable_locale
from ansible.module_utils.common.sys_info import get_platform_subclass
try:
import spwd
HAVE_SPWD = True
except ImportError:
HAVE_SPWD = False
_HASH_RE = re.compile(r'[^a-zA-Z0-9./=]')
class User(object):
"""
This is a generic User manipulation class that is subclassed
based on platform.
A subclass may wish to override the following action methods:-
- create_user()
- remove_user()
- modify_user()
- ssh_key_gen()
- ssh_key_fingerprint()
- user_exists()
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None
PASSWORDFILE = '/etc/passwd'
SHADOWFILE = '/etc/shadow'
SHADOWFILE_EXPIRE_INDEX = 7
LOGIN_DEFS = '/etc/login.defs'
DATE_FORMAT = '%Y-%m-%d'
def __new__(cls, *args, **kwargs):
new_cls = get_platform_subclass(User)
return super(cls, new_cls).__new__(new_cls)
def __init__(self, module):
self.module = module
self.state = module.params['state']
self.name = module.params['name']
self.uid = module.params['uid']
self.hidden = module.params['hidden']
self.non_unique = module.params['non_unique']
self.seuser = module.params['seuser']
self.group = module.params['group']
self.comment = module.params['comment']
self.shell = module.params['shell']
self.password = module.params['password']
self.force = module.params['force']
self.remove = module.params['remove']
self.create_home = module.params['create_home']
self.move_home = module.params['move_home']
self.skeleton = module.params['skeleton']
self.system = module.params['system']
self.login_class = module.params['login_class']
self.append = module.params['append']
self.sshkeygen = module.params['generate_ssh_key']
self.ssh_bits = module.params['ssh_key_bits']
self.ssh_type = module.params['ssh_key_type']
self.ssh_comment = module.params['ssh_key_comment']
self.ssh_passphrase = module.params['ssh_key_passphrase']
self.update_password = module.params['update_password']
self.home = module.params['home']
self.expires = None
self.password_lock = module.params['password_lock']
self.groups = None
self.local = module.params['local']
self.profile = module.params['profile']
self.authorization = module.params['authorization']
self.role = module.params['role']
self.password_expire_max = module.params['password_expire_max']
self.password_expire_min = module.params['password_expire_min']
self.umask = module.params['umask']
if self.umask is not None and self.local:
module.fail_json(msg="'umask' can not be used with 'local'")
if module.params['groups'] is not None:
self.groups = ','.join(module.params['groups'])
if module.params['expires'] is not None:
try:
self.expires = time.gmtime(module.params['expires'])
except Exception as e:
module.fail_json(msg="Invalid value for 'expires' %s: %s" % (self.expires, to_native(e)))
if module.params['ssh_key_file'] is not None:
self.ssh_file = module.params['ssh_key_file']
else:
self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type)
if self.groups is None and self.append:
# Change the argument_spec in 2.14 and remove this warning
# required_by={'append': ['groups']}
module.warn("'append' is set, but no 'groups' are specified. Use 'groups' for appending new groups."
"This will change to an error in Ansible 2.14.")
def check_password_encrypted(self):
# Darwin needs cleartext password, so skip validation
if self.module.params['password'] and self.platform != 'Darwin':
maybe_invalid = False
# Allow setting certain passwords in order to disable the account
if self.module.params['password'] in set(['*', '!', '*************']):
maybe_invalid = False
else:
# : for delimiter, * for disable user, ! for lock user
# these characters are invalid in the password
if any(char in self.module.params['password'] for char in ':*!'):
maybe_invalid = True
if '$' not in self.module.params['password']:
maybe_invalid = True
else:
fields = self.module.params['password'].split("$")
if len(fields) >= 3:
# contains character outside the crypto constraint
if bool(_HASH_RE.search(fields[-1])):
maybe_invalid = True
# md5
if fields[1] == '1' and len(fields[-1]) != 22:
maybe_invalid = True
# sha256
if fields[1] == '5' and len(fields[-1]) != 43:
maybe_invalid = True
# sha512
if fields[1] == '6' and len(fields[-1]) != 86:
maybe_invalid = True
else:
maybe_invalid = True
if maybe_invalid:
self.module.warn("The input password appears not to have been hashed. "
"The 'password' argument must be encrypted for this module to work properly.")
def execute_command(self, cmd, use_unsafe_shell=False, data=None, obey_checkmode=True):
if self.module.check_mode and obey_checkmode:
self.module.debug('In check mode, would have run: "%s"' % cmd)
return (0, '', '')
else:
# cast all args to strings ansible-modules-core/issues/4397
cmd = [str(x) for x in cmd]
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def backup_shadow(self):
if not self.module.check_mode and self.SHADOWFILE:
return self.module.backup_local(self.SHADOWFILE)
def remove_user_userdel(self):
if self.local:
command_name = 'luserdel'
else:
command_name = 'userdel'
cmd = [self.module.get_bin_path(command_name, True)]
if self.force and not self.local:
cmd.append('-f')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self):
if self.local:
command_name = 'luseradd'
lgroupmod_cmd = self.module.get_bin_path('lgroupmod', True)
lchage_cmd = self.module.get_bin_path('lchage', True)
else:
command_name = 'useradd'
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.seuser is not None:
cmd.append('-Z')
cmd.append(self.seuser)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
elif self.group_exists(self.name):
# use the -N option (no user group) if a group already
# exists with the same name as the user to prevent
# errors from useradd trying to create a group when
# USERGROUPS_ENAB is set in /etc/login.defs.
if os.path.exists('/etc/redhat-release'):
dist = distro.version()
major_release = int(dist.split('.')[0])
if major_release <= 5 or self.local:
cmd.append('-n')
else:
cmd.append('-N')
elif os.path.exists('/etc/SuSE-release'):
# -N did not exist in useradd before SLE 11 and did not
# automatically create a group
dist = distro.version()
major_release = int(dist.split('.')[0])
if major_release >= 12:
cmd.append('-N')
else:
cmd.append('-N')
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
if not self.local:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
# If the specified path to the user home contains parent directories that
# do not exist and create_home is True first create the parent directory
# since useradd cannot create it.
if self.create_home:
parent = os.path.dirname(self.home)
if not os.path.isdir(parent):
self.create_homedir(self.home)
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.expires is not None and not self.local:
cmd.append('-e')
if self.expires < time.gmtime(0):
cmd.append('')
else:
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.password is not None:
cmd.append('-p')
if self.password_lock:
cmd.append('!%s' % self.password)
else:
cmd.append(self.password)
if self.create_home:
if not self.local:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if not self.local or rc != 0:
return (rc, out, err)
if self.expires is not None:
if self.expires < time.gmtime(0):
lexpires = -1
else:
# Convert seconds since Epoch to days since Epoch
lexpires = int(math.floor(self.module.params['expires'])) // 86400
(rc, _out, _err) = self.execute_command([lchage_cmd, '-E', to_native(lexpires), self.name])
out += _out
err += _err
if rc != 0:
return (rc, out, err)
if self.groups is None or len(self.groups) == 0:
return (rc, out, err)
for add_group in groups:
(rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-M', self.name, add_group])
out += _out
err += _err
if rc != 0:
return (rc, out, err)
return (rc, out, err)
def _check_usermod_append(self):
# check if this version of usermod can append groups
if self.local:
command_name = 'lusermod'
else:
command_name = 'usermod'
usermod_path = self.module.get_bin_path(command_name, True)
# for some reason, usermod --help cannot be used by non root
# on RH/Fedora, due to lack of execute bit for others
if not os.access(usermod_path, os.X_OK):
return False
cmd = [usermod_path, '--help']
(rc, data1, data2) = self.execute_command(cmd, obey_checkmode=False)
helpout = data1 + data2
# check if --append exists
lines = to_native(helpout).split('\n')
for line in lines:
if line.strip().startswith('-a, --append'):
return True
return False
def modify_user_usermod(self):
if self.local:
command_name = 'lusermod'
lgroupmod_cmd = self.module.get_bin_path('lgroupmod', True)
lgroupmod_add = set()
lgroupmod_del = set()
lchage_cmd = self.module.get_bin_path('lchage', True)
lexpires = None
else:
command_name = 'usermod'
cmd = [self.module.get_bin_path(command_name, True)]
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
# get a list of all groups for the user, including the primary
current_groups = self.user_group_membership(exclude_primary=False)
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
if has_append:
cmd.append('-a')
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if self.local:
if self.append:
lgroupmod_add = set(groups).difference(current_groups)
lgroupmod_del = set()
else:
lgroupmod_add = set(groups).difference(current_groups)
lgroupmod_del = set(current_groups).difference(groups)
else:
if self.append and not has_append:
cmd.append('-A')
cmd.append(','.join(group_diff))
else:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.expires is not None:
current_expires = int(self.user_password()[1])
if self.expires < time.gmtime(0):
if current_expires >= 0:
if self.local:
lexpires = -1
else:
cmd.append('-e')
cmd.append('')
else:
# Convert days since Epoch to seconds since Epoch as struct_time
current_expire_date = time.gmtime(current_expires * 86400)
# Current expires is negative or we compare year, month, and day only
if current_expires < 0 or current_expire_date[:3] != self.expires[:3]:
if self.local:
# Convert seconds since Epoch to days since Epoch
lexpires = int(math.floor(self.module.params['expires'])) // 86400
else:
cmd.append('-e')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
# Lock if no password or unlocked, unlock only if locked
if self.password_lock and not info[1].startswith('!'):
cmd.append('-L')
elif self.password_lock is False and info[1].startswith('!'):
# usermod will refuse to unlock a user with no password, module shows 'changed' regardless
cmd.append('-U')
if self.update_password == 'always' and self.password is not None and info[1].lstrip('!') != self.password.lstrip('!'):
# Remove options that are mutually exclusive with -p
cmd = [c for c in cmd if c not in ['-U', '-L']]
cmd.append('-p')
if self.password_lock:
# Lock the account and set the hash in a single command
cmd.append('!%s' % self.password)
else:
cmd.append(self.password)
(rc, out, err) = (None, '', '')
# skip if no usermod changes to be made
if len(cmd) > 1:
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if not self.local or not (rc is None or rc == 0):
return (rc, out, err)
if lexpires is not None:
(rc, _out, _err) = self.execute_command([lchage_cmd, '-E', to_native(lexpires), self.name])
out += _out
err += _err
if rc != 0:
return (rc, out, err)
if len(lgroupmod_add) == 0 and len(lgroupmod_del) == 0:
return (rc, out, err)
for add_group in lgroupmod_add:
(rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-M', self.name, add_group])
out += _out
err += _err
if rc != 0:
return (rc, out, err)
for del_group in lgroupmod_del:
(rc, _out, _err) = self.execute_command([lgroupmod_cmd, '-m', self.name, del_group])
out += _out
err += _err
if rc != 0:
return (rc, out, err)
return (rc, out, err)
def group_exists(self, group):
try:
# Try group as a gid first
grp.getgrgid(int(group))
return True
except (ValueError, KeyError):
try:
grp.getgrnam(group)
return True
except KeyError:
return False
def group_info(self, group):
if not self.group_exists(group):
return False
try:
# Try group as a gid first
return list(grp.getgrgid(int(group)))
except (ValueError, KeyError):
return list(grp.getgrnam(group))
def get_groups_set(self, remove_existing=True):
if self.groups is None:
return None
info = self.user_info()
groups = set(x.strip() for x in self.groups.split(',') if x)
for g in groups.copy():
if not self.group_exists(g):
self.module.fail_json(msg="Group %s does not exist" % (g))
if info and remove_existing and self.group_info(g)[2] == info[3]:
groups.remove(g)
return groups
def user_group_membership(self, exclude_primary=True):
''' Return a list of groups the user belongs to '''
groups = []
info = self.get_pwd_info()
for group in grp.getgrall():
if self.name in group.gr_mem:
# Exclude the user's primary group by default
if not exclude_primary:
groups.append(group[0])
else:
if info[3] != group.gr_gid:
groups.append(group[0])
return groups
def user_exists(self):
# The pwd module does not distinguish between local and directory accounts.
# It's output cannot be used to determine whether or not an account exists locally.
# It returns True if the account exists locally or in the directory, so instead
# look in the local PASSWORD file for an existing account.
if self.local:
if not os.path.exists(self.PASSWORDFILE):
self.module.fail_json(msg="'local: true' specified but unable to find local account file {0} to parse.".format(self.PASSWORDFILE))
exists = False
name_test = '{0}:'.format(self.name)
with open(self.PASSWORDFILE, 'rb') as f:
reversed_lines = f.readlines()[::-1]
for line in reversed_lines:
if line.startswith(to_bytes(name_test)):
exists = True
break
if not exists:
self.module.warn(
"'local: true' specified and user '{name}' was not found in {file}. "
"The local user account may already exist if the local account database exists "
"somewhere other than {file}.".format(file=self.PASSWORDFILE, name=self.name))
return exists
else:
try:
if pwd.getpwnam(self.name):
return True
except KeyError:
return False
def get_pwd_info(self):
if not self.user_exists():
return False
return list(pwd.getpwnam(self.name))
def user_info(self):
if not self.user_exists():
return False
info = self.get_pwd_info()
if len(info[1]) == 1 or len(info[1]) == 0:
info[1] = self.user_password()[0]
return info
def set_password_expire_max(self):
command_name = 'chage'
cmd = [self.module.get_bin_path(command_name, True)]
cmd.append('-M')
cmd.append(self.password_expire_max)
cmd.append(self.name)
if self.password_expire_max == spwd.getspnam(self.name).sp_max:
self.module.exit_json(changed=False)
else:
self.execute_command(cmd)
self.module.exit_json(changed=True)
def set_password_expire_min(self):
command_name = 'chage'
cmd = [self.module.get_bin_path(command_name, True)]
cmd.append('-m')
cmd.append(self.password_expire_min)
cmd.append(self.name)
if self.password_expire_min == spwd.getspnam(self.name).sp_min:
self.module.exit_json(changed=False)
else:
self.execute_command(cmd)
self.module.exit_json(changed=True)
def user_password(self):
passwd = ''
expires = ''
if HAVE_SPWD:
try:
passwd = spwd.getspnam(self.name)[1]
expires = spwd.getspnam(self.name)[7]
return passwd, expires
except KeyError:
return passwd, expires
except OSError as e:
# Python 3.6 raises PermissionError instead of KeyError
# Due to absence of PermissionError in python2.7 need to check
# errno
if e.errno in (errno.EACCES, errno.EPERM, errno.ENOENT):
return passwd, expires
raise
if not self.user_exists():
return passwd, expires
elif self.SHADOWFILE:
passwd, expires = self.parse_shadow_file()
return passwd, expires
def parse_shadow_file(self):
passwd = ''
expires = ''
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
with open(self.SHADOWFILE, 'r') as f:
for line in f:
if line.startswith('%s:' % self.name):
passwd = line.split(':')[1]
expires = line.split(':')[self.SHADOWFILE_EXPIRE_INDEX] or -1
return passwd, expires
def get_ssh_key_path(self):
info = self.user_info()
if os.path.isabs(self.ssh_file):
ssh_key_file = self.ssh_file
else:
if not os.path.exists(info[5]) and not self.module.check_mode:
raise Exception('User %s home directory does not exist' % self.name)
ssh_key_file = os.path.join(info[5], self.ssh_file)
return ssh_key_file
def ssh_key_gen(self):
info = self.user_info()
overwrite = None
try:
ssh_key_file = self.get_ssh_key_path()
except Exception as e:
return (1, '', to_native(e))
ssh_dir = os.path.dirname(ssh_key_file)
if not os.path.exists(ssh_dir):
if self.module.check_mode:
return (0, '', '')
try:
os.mkdir(ssh_dir, int('0700', 8))
os.chown(ssh_dir, info[2], info[3])
except OSError as e:
return (1, '', 'Failed to create %s: %s' % (ssh_dir, to_native(e)))
if os.path.exists(ssh_key_file):
if self.force:
# ssh-keygen doesn't support overwriting the key interactively, so send 'y' to confirm
overwrite = 'y'
else:
return (None, 'Key already exists, use "force: yes" to overwrite', '')
cmd = [self.module.get_bin_path('ssh-keygen', True)]
cmd.append('-t')
cmd.append(self.ssh_type)
if self.ssh_bits > 0:
cmd.append('-b')
cmd.append(self.ssh_bits)
cmd.append('-C')
cmd.append(self.ssh_comment)
cmd.append('-f')
cmd.append(ssh_key_file)
if self.ssh_passphrase is not None:
if self.module.check_mode:
self.module.debug('In check mode, would have run: "%s"' % cmd)
return (0, '', '')
master_in_fd, slave_in_fd = pty.openpty()
master_out_fd, slave_out_fd = pty.openpty()
master_err_fd, slave_err_fd = pty.openpty()
env = os.environ.copy()
env['LC_ALL'] = get_best_parsable_locale(self.module)
try:
p = subprocess.Popen([to_bytes(c) for c in cmd],
stdin=slave_in_fd,
stdout=slave_out_fd,
stderr=slave_err_fd,
preexec_fn=os.setsid,
env=env)
out_buffer = b''
err_buffer = b''
while p.poll() is None:
r, w, e = select.select([master_out_fd, master_err_fd], [], [], 1)
first_prompt = b'Enter passphrase (empty for no passphrase):'
second_prompt = b'Enter same passphrase again'
prompt = first_prompt
for fd in r:
if fd == master_out_fd:
chunk = os.read(master_out_fd, 10240)
out_buffer += chunk
if prompt in out_buffer:
os.write(master_in_fd, to_bytes(self.ssh_passphrase, errors='strict') + b'\r')
prompt = second_prompt
else:
chunk = os.read(master_err_fd, 10240)
err_buffer += chunk
if prompt in err_buffer:
os.write(master_in_fd, to_bytes(self.ssh_passphrase, errors='strict') + b'\r')
prompt = second_prompt
if b'Overwrite (y/n)?' in out_buffer or b'Overwrite (y/n)?' in err_buffer:
# The key was created between us checking for existence and now
return (None, 'Key already exists', '')
rc = p.returncode
out = to_native(out_buffer)
err = to_native(err_buffer)
except OSError as e:
return (1, '', to_native(e))
else:
cmd.append('-N')
cmd.append('')
(rc, out, err) = self.execute_command(cmd, data=overwrite)
if rc == 0 and not self.module.check_mode:
# If the keys were successfully created, we should be able
# to tweak ownership.
os.chown(ssh_key_file, info[2], info[3])
os.chown('%s.pub' % ssh_key_file, info[2], info[3])
return (rc, out, err)
def ssh_key_fingerprint(self):
ssh_key_file = self.get_ssh_key_path()
if not os.path.exists(ssh_key_file):
return (1, 'SSH Key file %s does not exist' % ssh_key_file, '')
cmd = [self.module.get_bin_path('ssh-keygen', True)]
cmd.append('-l')
cmd.append('-f')
cmd.append(ssh_key_file)
return self.execute_command(cmd, obey_checkmode=False)
def get_ssh_public_key(self):
ssh_public_key_file = '%s.pub' % self.get_ssh_key_path()
try:
with open(ssh_public_key_file, 'r') as f:
ssh_public_key = f.read().strip()
except IOError:
return None
return ssh_public_key
def create_user(self):
# by default we use the create_user_useradd method
return self.create_user_useradd()
def remove_user(self):
# by default we use the remove_user_userdel method
return self.remove_user_userdel()
def modify_user(self):
# by default we use the modify_user_usermod method
return self.modify_user_usermod()
def create_homedir(self, path):
if not os.path.exists(path):
if self.skeleton is not None:
skeleton = self.skeleton
else:
skeleton = '/etc/skel'
if os.path.exists(skeleton):
try:
shutil.copytree(skeleton, path, symlinks=True)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
else:
try:
os.makedirs(path)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
# get umask from /etc/login.defs and set correct home mode
if os.path.exists(self.LOGIN_DEFS):
with open(self.LOGIN_DEFS, 'r') as f:
for line in f:
m = re.match(r'^UMASK\s+(\d+)$', line)
if m:
umask = int(m.group(1), 8)
mode = 0o777 & ~umask
try:
os.chmod(path, mode)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
def chown_homedir(self, uid, gid, path):
try:
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
except OSError as e:
self.module.exit_json(failed=True, msg="%s" % to_native(e))
# ===========================================
class FreeBsdUser(User):
"""
This is a FreeBSD User manipulation class - it uses the pw command
to manipulate the user database, followed by the chpass command
to change the password.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'FreeBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
SHADOWFILE_EXPIRE_INDEX = 6
DATE_FORMAT = '%d-%b-%Y'
def _handle_lock(self):
info = self.user_info()
if self.password_lock and not info[1].startswith('*LOCKED*'):
cmd = [
self.module.get_bin_path('pw', True),
'lock',
self.name
]
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
return self.execute_command(cmd)
elif self.password_lock is False and info[1].startswith('*LOCKED*'):
cmd = [
self.module.get_bin_path('pw', True),
'unlock',
self.name
]
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
return self.execute_command(cmd)
return (None, '', '')
def remove_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'userdel',
'-n',
self.name
]
if self.remove:
cmd.append('-r')
return self.execute_command(cmd)
def create_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'useradd',
'-n',
self.name,
]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.expires is not None:
cmd.append('-e')
if self.expires < time.gmtime(0):
cmd.append('0')
else:
cmd.append(str(calendar.timegm(self.expires)))
# system cannot be handled currently - should we error if its requested?
# create the user
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# we have to set the password in a second command
if self.password is not None:
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
_rc, _out, _err = self.execute_command(cmd)
if rc is None:
rc = _rc
out += _out
err += _err
# we have to lock/unlock the password in a distinct command
_rc, _out, _err = self._handle_lock()
if rc is None:
rc = _rc
out += _out
err += _err
return (rc, out, err)
def modify_user(self):
cmd = [
self.module.get_bin_path('pw', True),
'usermod',
'-n',
self.name
]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
if (info[5] != self.home and self.move_home) or (not os.path.exists(self.home) and self.create_home):
cmd.append('-m')
if info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
with open(self.SHADOWFILE, 'r') as f:
for line in f:
if line.startswith('%s:' % self.name):
user_login_class = line.split(':')[4]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
if self.expires is not None:
current_expires = int(self.user_password()[1])
# If expiration is negative or zero and the current expiration is greater than zero, disable expiration.
# In OpenBSD, setting expiration to zero disables expiration. It does not expire the account.
if self.expires <= time.gmtime(0):
if current_expires > 0:
cmd.append('-e')
cmd.append('0')
else:
# Convert days since Epoch to seconds since Epoch as struct_time
current_expire_date = time.gmtime(current_expires)
# Current expires is negative or we compare year, month, and day only
if current_expires <= 0 or current_expire_date[:3] != self.expires[:3]:
cmd.append('-e')
cmd.append(str(calendar.timegm(self.expires)))
(rc, out, err) = (None, '', '')
# modify the user if cmd will do anything
if cmd_len != len(cmd):
(rc, _out, _err) = self.execute_command(cmd)
out += _out
err += _err
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# we have to set the password in a second command
if self.update_password == 'always' and self.password is not None and info[1].lstrip('*LOCKED*') != self.password.lstrip('*LOCKED*'):
cmd = [
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
]
_rc, _out, _err = self.execute_command(cmd)
if rc is None:
rc = _rc
out += _out
err += _err
# we have to lock/unlock the password in a distinct command
_rc, _out, _err = self._handle_lock()
if rc is None:
rc = _rc
out += _out
err += _err
return (rc, out, err)
class DragonFlyBsdUser(FreeBsdUser):
"""
This is a DragonFlyBSD User manipulation class - it inherits the
FreeBsdUser class behaviors, such as using the pw command to
manipulate the user database, followed by the chpass command
to change the password.
"""
platform = 'DragonFly'
class OpenBSDUser(User):
"""
This is a OpenBSD User manipulation class.
Main differences are that OpenBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'OpenBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None and self.password != '*':
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups_option = '-S'
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_option = '-G'
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append(groups_option)
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
# find current login class
user_login_class = None
userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name]
(rc, out, err) = self.execute_command(userinfo_cmd, obey_checkmode=False)
for line in out.splitlines():
tokens = line.split()
if tokens[0] == 'class' and len(tokens) == 2:
user_login_class = tokens[1]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.password_lock and not info[1].startswith('*'):
cmd.append('-Z')
elif self.password_lock is False and info[1].startswith('*'):
cmd.append('-U')
if self.update_password == 'always' and self.password is not None \
and self.password != '*' and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
class NetBSDUser(User):
"""
This is a NetBSD User manipulation class.
Main differences are that NetBSD:-
- has no concept of "system" account.
- has no force delete user
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'NetBSD'
distribution = None
SHADOWFILE = '/etc/master.passwd'
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups = set(current_groups).union(groups)
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if len(groups) > 16:
self.module.fail_json(msg="Too many groups (%d) NetBSD allows for 16 max." % len(groups))
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
if self.password_lock and not info[1].startswith('*LOCKED*'):
cmd.append('-C yes')
elif self.password_lock is False and info[1].startswith('*LOCKED*'):
cmd.append('-C no')
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
class SunOS(User):
"""
This is a SunOS User manipulation class - The main difference between
this class and the generic user class is that Solaris-type distros
don't support the concept of a "system" account and we need to
edit the /etc/shadow file manually to set a password. (Ugh)
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
- user_info()
"""
platform = 'SunOS'
distribution = None
SHADOWFILE = '/etc/shadow'
USER_ATTR = '/etc/user_attr'
def get_password_defaults(self):
# Read password aging defaults
try:
minweeks = ''
maxweeks = ''
warnweeks = ''
with open("/etc/default/passwd", 'r') as f:
for line in f:
line = line.strip()
if (line.startswith('#') or line == ''):
continue
m = re.match(r'^([^#]*)#(.*)$', line)
if m: # The line contains a hash / comment
line = m.group(1)
key, value = line.split('=')
if key == "MINWEEKS":
minweeks = value.rstrip('\n')
elif key == "MAXWEEKS":
maxweeks = value.rstrip('\n')
elif key == "WARNWEEKS":
warnweeks = value.rstrip('\n')
except Exception as err:
self.module.fail_json(msg="failed to read /etc/default/passwd: %s" % to_native(err))
return (minweeks, maxweeks, warnweeks)
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user(self):
cmd = [self.module.get_bin_path('useradd', True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
if self.profile is not None:
cmd.append('-P')
cmd.append(self.profile)
if self.authorization is not None:
cmd.append('-A')
cmd.append(self.authorization)
if self.role is not None:
cmd.append('-R')
cmd.append(self.role)
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
if not self.module.check_mode:
# we have to set the password by editing the /etc/shadow file
if self.password is not None:
self.backup_shadow()
minweeks, maxweeks, warnweeks = self.get_password_defaults()
try:
lines = []
with open(self.SHADOWFILE, 'rb') as f:
for line in f:
line = to_native(line, errors='surrogate_or_strict')
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() // 86400))
if minweeks:
try:
fields[3] = str(int(minweeks) * 7)
except ValueError:
# mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
pass
if maxweeks:
try:
fields[4] = str(int(maxweeks) * 7)
except ValueError:
# mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
pass
if warnweeks:
try:
fields[5] = str(int(warnweeks) * 7)
except ValueError:
# mirror solaris, which allows for any value in this field, and ignores anything that is not an int.
pass
line = ':'.join(fields)
lines.append('%s\n' % line)
with open(self.SHADOWFILE, 'w+') as f:
f.writelines(lines)
except Exception as err:
self.module.fail_json(msg="failed to update users password: %s" % to_native(err))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
cmd_len = len(cmd)
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
groups_need_mod = False
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups.update(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.profile is not None and info[7] != self.profile:
cmd.append('-P')
cmd.append(self.profile)
if self.authorization is not None and info[8] != self.authorization:
cmd.append('-A')
cmd.append(self.authorization)
if self.role is not None and info[9] != self.role:
cmd.append('-R')
cmd.append(self.role)
# modify the user if cmd will do anything
if cmd_len != len(cmd):
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
else:
(rc, out, err) = (None, '', '')
# we have to set the password by editing the /etc/shadow file
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
self.backup_shadow()
(rc, out, err) = (0, '', '')
if not self.module.check_mode:
minweeks, maxweeks, warnweeks = self.get_password_defaults()
try:
lines = []
with open(self.SHADOWFILE, 'rb') as f:
for line in f:
line = to_native(line, errors='surrogate_or_strict')
fields = line.strip().split(':')
if not fields[0] == self.name:
lines.append(line)
continue
fields[1] = self.password
fields[2] = str(int(time.time() // 86400))
if minweeks:
fields[3] = str(int(minweeks) * 7)
if maxweeks:
fields[4] = str(int(maxweeks) * 7)
if warnweeks:
fields[5] = str(int(warnweeks) * 7)
line = ':'.join(fields)
lines.append('%s\n' % line)
with open(self.SHADOWFILE, 'w+') as f:
f.writelines(lines)
rc = 0
except Exception as err:
self.module.fail_json(msg="failed to update users password: %s" % to_native(err))
return (rc, out, err)
def user_info(self):
info = super(SunOS, self).user_info()
if info:
info += self._user_attr_info()
return info
def _user_attr_info(self):
info = [''] * 3
with open(self.USER_ATTR, 'r') as file_handler:
for line in file_handler:
lines = line.strip().split('::::')
if lines[0] == self.name:
tmp = dict(x.split('=') for x in lines[1].split(';'))
info[0] = tmp.get('profiles', '')
info[1] = tmp.get('auths', '')
info[2] = tmp.get('roles', '')
return info
class DarwinUser(User):
"""
This is a Darwin macOS User manipulation class.
Main differences are that Darwin:-
- Handles accounts in a database managed by dscl(1)
- Has no useradd/groupadd
- Does not create home directories
- User password must be cleartext
- UID must be given
- System users must ben under 500
This overrides the following methods from the generic class:-
- user_exists()
- create_user()
- remove_user()
- modify_user()
"""
platform = 'Darwin'
distribution = None
SHADOWFILE = None
dscl_directory = '.'
fields = [
('comment', 'RealName'),
('home', 'NFSHomeDirectory'),
('shell', 'UserShell'),
('uid', 'UniqueID'),
('group', 'PrimaryGroupID'),
('hidden', 'IsHidden'),
]
def __init__(self, module):
super(DarwinUser, self).__init__(module)
# make the user hidden if option is set or deffer to system option
if self.hidden is None:
if self.system:
self.hidden = 1
elif self.hidden:
self.hidden = 1
else:
self.hidden = 0
# add hidden to processing if set
if self.hidden is not None:
self.fields.append(('hidden', 'IsHidden'))
def _get_dscl(self):
return [self.module.get_bin_path('dscl', True), self.dscl_directory]
def _list_user_groups(self):
cmd = self._get_dscl()
cmd += ['-search', '/Groups', 'GroupMembership', self.name]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
groups = []
for line in out.splitlines():
if line.startswith(' ') or line.startswith(')'):
continue
groups.append(line.split()[0])
return groups
def _get_user_property(self, property):
'''Return user PROPERTY as given my dscl(1) read or None if not found.'''
cmd = self._get_dscl()
cmd += ['-read', '/Users/%s' % self.name, property]
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
if rc != 0:
return None
# from dscl(1)
# if property contains embedded spaces, the list will instead be
# displayed one entry per line, starting on the line after the key.
lines = out.splitlines()
# sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines))
if len(lines) == 1:
return lines[0].split(': ')[1]
if len(lines) > 2:
return '\n'.join([lines[1].strip()] + lines[2:])
if len(lines) == 2:
return lines[1].strip()
return None
def _get_next_uid(self, system=None):
'''
Return the next available uid. If system=True, then
uid should be below of 500, if possible.
'''
cmd = self._get_dscl()
cmd += ['-list', '/Users', 'UniqueID']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
if rc != 0:
self.module.fail_json(
msg="Unable to get the next available uid",
rc=rc,
out=out,
err=err
)
max_uid = 0
max_system_uid = 0
for line in out.splitlines():
current_uid = int(line.split(' ')[-1])
if max_uid < current_uid:
max_uid = current_uid
if max_system_uid < current_uid and current_uid < 500:
max_system_uid = current_uid
if system and (0 < max_system_uid < 499):
return max_system_uid + 1
return max_uid + 1
def _change_user_password(self):
'''Change password for SELF.NAME against SELF.PASSWORD.
Please note that password must be cleartext.
'''
# some documentation on how is stored passwords on OSX:
# http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/
# http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/
# http://pastebin.com/RYqxi7Ca
# on OSX 10.8+ hash is SALTED-SHA512-PBKDF2
# https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html
# https://gist.github.com/nueh/8252572
cmd = self._get_dscl()
if self.password:
cmd += ['-passwd', '/Users/%s' % self.name, self.password]
else:
cmd += ['-create', '/Users/%s' % self.name, 'Password', '*']
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Error when changing password', err=err, out=out, rc=rc)
return (rc, out, err)
def _make_group_numerical(self):
'''Convert SELF.GROUP to is stringed numerical value suitable for dscl.'''
if self.group is None:
self.group = 'nogroup'
try:
self.group = grp.getgrnam(self.group).gr_gid
except KeyError:
self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group)
# We need to pass a string to dscl
self.group = str(self.group)
def __modify_group(self, group, action):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
if action == 'add':
option = '-a'
else:
option = '-d'
cmd = ['dseditgroup', '-o', 'edit', option, self.name, '-t', 'user', group]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot %s user "%s" to group "%s".'
% (action, self.name, group), err=err, out=out, rc=rc)
return (rc, out, err)
def _modify_group(self):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwise 'remove' is assumed. '''
rc = 0
out = ''
err = ''
changed = False
current = set(self._list_user_groups())
if self.groups is not None:
target = set(self.groups.split(','))
else:
target = set([])
if self.append is False:
for remove in current - target:
(_rc, _out, _err) = self.__modify_group(remove, 'delete')
rc += rc
out += _out
err += _err
changed = True
for add in target - current:
(_rc, _out, _err) = self.__modify_group(add, 'add')
rc += _rc
out += _out
err += _err
changed = True
return (rc, out, err, changed)
def _update_system_user(self):
'''Hide or show user on login window according SELF.SYSTEM.
Returns 0 if a change has been made, None otherwise.'''
plist_file = '/Library/Preferences/com.apple.loginwindow.plist'
# http://support.apple.com/kb/HT5017?viewlocale=en_US
cmd = ['defaults', 'read', plist_file, 'HiddenUsersList']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
# returned value is
# (
# "_userA",
# "_UserB",
# userc
# )
hidden_users = []
for x in out.splitlines()[1:-1]:
try:
x = x.split('"')[1]
except IndexError:
x = x.strip()
hidden_users.append(x)
if self.system:
if self.name not in hidden_users:
cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array-add', self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot user "%s" to hidden user list.' % self.name, err=err, out=out, rc=rc)
return 0
else:
if self.name in hidden_users:
del (hidden_users[hidden_users.index(self.name)])
cmd = ['defaults', 'write', plist_file, 'HiddenUsersList', '-array'] + hidden_users
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot remove user "%s" from hidden user list.' % self.name, err=err, out=out, rc=rc)
return 0
def user_exists(self):
'''Check is SELF.NAME is a known user on the system.'''
cmd = self._get_dscl()
cmd += ['-read', '/Users/%s' % self.name, 'UniqueID']
(rc, out, err) = self.execute_command(cmd, obey_checkmode=False)
return rc == 0
def remove_user(self):
'''Delete SELF.NAME. If SELF.FORCE is true, remove its home directory.'''
info = self.user_info()
cmd = self._get_dscl()
cmd += ['-delete', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot delete user "%s".' % self.name, err=err, out=out, rc=rc)
if self.force:
if os.path.exists(info[5]):
shutil.rmtree(info[5])
out += "Removed %s" % info[5]
return (rc, out, err)
def create_user(self, command_name='dscl'):
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot create user "%s".' % self.name, err=err, out=out, rc=rc)
self._make_group_numerical()
if self.uid is None:
self.uid = str(self._get_next_uid(self.system))
# Homedir is not created by default
if self.create_home:
if self.home is None:
self.home = '/Users/%s' % self.name
if not self.module.check_mode:
if not os.path.exists(self.home):
os.makedirs(self.home)
self.chown_homedir(int(self.uid), int(self.group), self.home)
# dscl sets shell to /usr/bin/false when UserShell is not specified
# so set the shell to /bin/bash when the user is not a system user
if not self.system and self.shell is None:
self.shell = '/bin/bash'
for field in self.fields:
if field[0] in self.__dict__ and self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
(rc, _out, _err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot add property "%s" to user "%s".' % (field[0], self.name), err=err, out=out, rc=rc)
out += _out
err += _err
if rc != 0:
return (rc, _out, _err)
(rc, _out, _err) = self._change_user_password()
out += _out
err += _err
self._update_system_user()
# here we don't care about change status since it is a creation,
# thus changed is always true.
if self.groups:
(rc, _out, _err, changed) = self._modify_group()
out += _out
err += _err
return (rc, out, err)
def modify_user(self):
changed = None
out = ''
err = ''
if self.group:
self._make_group_numerical()
for field in self.fields:
if field[0] in self.__dict__ and self.__dict__[field[0]]:
current = self._get_user_property(field[1])
if current is None or current != to_text(self.__dict__[field[0]]):
cmd = self._get_dscl()
cmd += ['-create', '/Users/%s' % self.name, field[1], self.__dict__[field[0]]]
(rc, _out, _err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot update property "%s" for user "%s".'
% (field[0], self.name), err=err, out=out, rc=rc)
changed = rc
out += _out
err += _err
if self.update_password == 'always' and self.password is not None:
(rc, _out, _err) = self._change_user_password()
out += _out
err += _err
changed = rc
if self.groups:
(rc, _out, _err, _changed) = self._modify_group()
out += _out
err += _err
if _changed is True:
changed = rc
rc = self._update_system_user()
if rc == 0:
changed = rc
return (changed, out, err)
class AIX(User):
"""
This is a AIX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
- parse_shadow_file()
"""
platform = 'AIX'
distribution = None
SHADOWFILE = '/etc/security/passwd'
def remove_user(self):
cmd = [self.module.get_bin_path('userdel', True)]
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def create_user_useradd(self, command_name='useradd'):
cmd = [self.module.get_bin_path(command_name, True)]
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.create_home:
cmd.append('-m')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.password is not None:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
return (rc, out, err)
def modify_user_usermod(self):
cmd = [self.module.get_bin_path('usermod', True)]
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
if self.move_home:
cmd.append('-m')
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
# skip if no changes to be made
if len(cmd) == 1:
(rc, out, err) = (None, '', '')
else:
cmd.append(self.name)
(rc, out, err) = self.execute_command(cmd)
# set password with chpasswd
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = []
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
(rc2, out2, err2) = self.execute_command(cmd, data="%s:%s" % (self.name, self.password))
else:
(rc2, out2, err2) = (None, '', '')
if rc is not None:
return (rc, out + out2, err + err2)
else:
return (rc2, out + out2, err + err2)
def parse_shadow_file(self):
"""Example AIX shadowfile data:
nobody:
password = *
operator1:
password = {ssha512}06$xxxxxxxxxxxx....
lastupdate = 1549558094
test1:
password = *
lastupdate = 1553695126
"""
b_name = to_bytes(self.name)
b_passwd = b''
b_expires = b''
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
with open(self.SHADOWFILE, 'rb') as bf:
b_lines = bf.readlines()
b_passwd_line = b''
b_expires_line = b''
try:
for index, b_line in enumerate(b_lines):
# Get password and lastupdate lines which come after the username
if b_line.startswith(b'%s:' % b_name):
b_passwd_line = b_lines[index + 1]
b_expires_line = b_lines[index + 2]
break
# Sanity check the lines because sometimes both are not present
if b' = ' in b_passwd_line:
b_passwd = b_passwd_line.split(b' = ', 1)[-1].strip()
if b' = ' in b_expires_line:
b_expires = b_expires_line.split(b' = ', 1)[-1].strip()
except IndexError:
self.module.fail_json(msg='Failed to parse shadow file %s' % self.SHADOWFILE)
passwd = to_native(b_passwd)
expires = to_native(b_expires) or -1
return passwd, expires
class HPUX(User):
"""
This is a HP-UX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'HP-UX'
distribution = None
SHADOWFILE = '/etc/shadow'
def create_user(self):
cmd = ['/usr/sam/lbin/useradd.sam']
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.create_home:
cmd.append('-m')
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user(self):
cmd = ['/usr/sam/lbin/userdel.sam']
if self.force:
cmd.append('-F')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = ['/usr/sam/lbin/usermod.sam']
info = self.user_info()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
cmd.append('-G')
new_groups = groups
if self.append:
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-F')
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
class BusyBox(User):
"""
This is the BusyBox class for use on systems that have adduser, deluser,
and delgroup commands. It overrides the following methods:
- create_user()
- remove_user()
- modify_user()
"""
def create_user(self):
cmd = [self.module.get_bin_path('adduser', True)]
cmd.append('-D')
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg='Group {0} does not exist'.format(self.group))
cmd.append('-G')
cmd.append(self.group)
if self.comment is not None:
cmd.append('-g')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-h')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if not self.create_home:
cmd.append('-H')
if self.skeleton is not None:
cmd.append('-k')
cmd.append(self.skeleton)
if self.umask is not None:
cmd.append('-K')
cmd.append('UMASK=' + self.umask)
if self.system:
cmd.append('-S')
cmd.append(self.name)
rc, out, err = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
if self.password is not None:
cmd = [self.module.get_bin_path('chpasswd', True)]
cmd.append('--encrypted')
data = '{name}:{password}'.format(name=self.name, password=self.password)
rc, out, err = self.execute_command(cmd, data=data)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# Add to additional groups
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
add_cmd_bin = self.module.get_bin_path('adduser', True)
for group in groups:
cmd = [add_cmd_bin, self.name, group]
rc, out, err = self.execute_command(cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
return rc, out, err
def remove_user(self):
cmd = [
self.module.get_bin_path('deluser', True),
self.name
]
if self.remove:
cmd.append('--remove-home')
return self.execute_command(cmd)
def modify_user(self):
current_groups = self.user_group_membership()
groups = []
rc = None
out = ''
err = ''
info = self.user_info()
add_cmd_bin = self.module.get_bin_path('adduser', True)
remove_cmd_bin = self.module.get_bin_path('delgroup', True)
# Manage group membership
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
for g in groups:
if g in group_diff:
add_cmd = [add_cmd_bin, self.name, g]
rc, out, err = self.execute_command(add_cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
for g in group_diff:
if g not in groups and not self.append:
remove_cmd = [remove_cmd_bin, self.name, g]
rc, out, err = self.execute_command(remove_cmd)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
# Manage password
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = [self.module.get_bin_path('chpasswd', True)]
cmd.append('--encrypted')
data = '{name}:{password}'.format(name=self.name, password=self.password)
rc, out, err = self.execute_command(cmd, data=data)
if rc is not None and rc != 0:
self.module.fail_json(name=self.name, msg=err, rc=rc)
return rc, out, err
class Alpine(BusyBox):
"""
This is the Alpine User manipulation class. It inherits the BusyBox class
behaviors such as using adduser and deluser commands.
"""
platform = 'Linux'
distribution = 'Alpine'
def main():
ssh_defaults = dict(
bits=0,
type='rsa',
passphrase=None,
comment='ansible-generated on %s' % socket.gethostname()
)
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
name=dict(type='str', required=True, aliases=['user']),
uid=dict(type='int'),
non_unique=dict(type='bool', default=False),
group=dict(type='str'),
groups=dict(type='list', elements='str'),
comment=dict(type='str'),
home=dict(type='path'),
shell=dict(type='str'),
password=dict(type='str', no_log=True),
login_class=dict(type='str'),
password_expire_max=dict(type='int', no_log=False),
password_expire_min=dict(type='int', no_log=False),
# following options are specific to macOS
hidden=dict(type='bool'),
# following options are specific to selinux
seuser=dict(type='str'),
# following options are specific to userdel
force=dict(type='bool', default=False),
remove=dict(type='bool', default=False),
# following options are specific to useradd
create_home=dict(type='bool', default=True, aliases=['createhome']),
skeleton=dict(type='str'),
system=dict(type='bool', default=False),
# following options are specific to usermod
move_home=dict(type='bool', default=False),
append=dict(type='bool', default=False),
# following are specific to ssh key generation
generate_ssh_key=dict(type='bool'),
ssh_key_bits=dict(type='int', default=ssh_defaults['bits']),
ssh_key_type=dict(type='str', default=ssh_defaults['type']),
ssh_key_file=dict(type='path'),
ssh_key_comment=dict(type='str', default=ssh_defaults['comment']),
ssh_key_passphrase=dict(type='str', no_log=True),
update_password=dict(type='str', default='always', choices=['always', 'on_create'], no_log=False),
expires=dict(type='float'),
password_lock=dict(type='bool', no_log=False),
local=dict(type='bool'),
profile=dict(type='str'),
authorization=dict(type='str'),
role=dict(type='str'),
umask=dict(type='str'),
),
supports_check_mode=True,
)
user = User(module)
user.check_password_encrypted()
module.debug('User instantiated - platform %s' % user.platform)
if user.distribution:
module.debug('User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = user.name
result['state'] = user.state
if user.state == 'absent':
if user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.remove_user()
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
result['force'] = user.force
result['remove'] = user.remove
elif user.state == 'present':
if not user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
# Check to see if the provided home path contains parent directories
# that do not exist.
path_needs_parents = False
if user.home and user.create_home:
parent = os.path.dirname(user.home)
if not os.path.isdir(parent):
path_needs_parents = True
(rc, out, err) = user.create_user()
# If the home path had parent directories that needed to be created,
# make sure file permissions are correct in the created home directory.
if path_needs_parents:
info = user.user_info()
if info is not False:
user.chown_homedir(info[2], info[3], user.home)
if module.check_mode:
result['system'] = user.name
else:
result['system'] = user.system
result['create_home'] = user.create_home
else:
# modify user (note: this function is check mode aware)
(rc, out, err) = user.modify_user()
result['append'] = user.append
result['move_home'] = user.move_home
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if user.password is not None:
result['password'] = 'NOT_LOGGING_PASSWORD'
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if user.user_exists() and user.state == 'present':
info = user.user_info()
if info is False:
result['msg'] = "failed to look up user name: %s" % user.name
result['failed'] = True
result['uid'] = info[2]
result['group'] = info[3]
result['comment'] = info[4]
result['home'] = info[5]
result['shell'] = info[6]
if user.groups is not None:
result['groups'] = user.groups
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.create_home:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
# deal with ssh key
if user.sshkeygen:
# generate ssh key (note: this function is check mode aware)
(rc, out, err) = user.ssh_key_gen()
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if rc == 0:
result['changed'] = True
(rc, out, err) = user.ssh_key_fingerprint()
if rc == 0:
result['ssh_fingerprint'] = out.strip()
else:
result['ssh_fingerprint'] = err.strip()
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
# deal with password expire max
if user.password_expire_max:
if user.user_exists():
user.set_password_expire_max()
# deal with password expire min
if user.password_expire_min:
if user.user_exists():
user.set_password_expire_min()
module.exit_json(**result)
# import module snippets
if __name__ == '__main__':
main()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,017 |
user module can't handle password expiration parameters correctly
|
### Summary
When using the user module to modify password_expire_min=0, the processing result does not meet expectations .
I guess the problem is caused by the following code in the user module.
```
# deal with password expire min
if user.password_expire_min:
if user.user_exists():
(rc, out, err) = user.set_password_expire_min()
```
Modifying to the following code may fix this problem.
```
# deal with password expire min
if user.password_expire_min is not None:
if user.user_exists():
(rc, out, err) = user.set_password_expire_min()
```
### Issue Type
Bug Report
### Component Name
user
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.1]
config file = None
configured module search path = ['/home/betadmin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /app/python3/lib/python3.8/site-packages/ansible
ansible collection location = /home/betadmin/.ansible/collections:/usr/share/ansible/collections
executable location = /app/python3/bin/ansible
python version = 3.8.5 (default, Sep 18 2020, 14:55:53) [GCC 4.8.5 20150623 (Red Hat 4.8.5-16)]
jinja version = 2.11.2
libyaml = False
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
RHEL 7.4
### Steps to Reproduce
- name: Edit password_expire_min
user:
name: test
password_expire_min: 0
### Expected Results
Minimum number of days between password change : 0
### Actual Results
Minimum number of days between password change : 2
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75017
|
https://github.com/ansible/ansible/pull/75390
|
5a690239411567e125ae5da5d03494cc0acf6ebf
|
dbde2c2ae3b03469abbe8f2c98b50ffedcf7975f
| 2021-06-16T02:19:51Z |
python
| 2022-01-24T19:52:38Z |
test/integration/targets/user/tasks/main.yml
|
# Test code for the user module.
# (c) 2017, James Tanner <[email protected]>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
- name: skip broken distros
meta: end_host
when: ansible_distribution == 'Alpine'
- import_tasks: test_create_user.yml
- import_tasks: test_create_system_user.yml
- import_tasks: test_create_user_uid.yml
- import_tasks: test_create_user_password.yml
- import_tasks: test_create_user_home.yml
- import_tasks: test_remove_user.yml
- import_tasks: test_no_home_fallback.yml
- import_tasks: test_expires.yml
- import_tasks: test_expires_new_account.yml
- import_tasks: test_expires_new_account_epoch_negative.yml
- import_tasks: test_expires_min_max.yml
- import_tasks: test_shadow_backup.yml
- import_tasks: test_ssh_key_passphrase.yml
- import_tasks: test_password_lock.yml
- import_tasks: test_password_lock_new_user.yml
- import_tasks: test_local.yml
- import_tasks: test_umask.yml
when: ansible_facts.system == 'Linux'
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,017 |
user module can't handle password expiration parameters correctly
|
### Summary
When using the user module to modify password_expire_min=0, the processing result does not meet expectations .
I guess the problem is caused by the following code in the user module.
```
# deal with password expire min
if user.password_expire_min:
if user.user_exists():
(rc, out, err) = user.set_password_expire_min()
```
Modifying to the following code may fix this problem.
```
# deal with password expire min
if user.password_expire_min is not None:
if user.user_exists():
(rc, out, err) = user.set_password_expire_min()
```
### Issue Type
Bug Report
### Component Name
user
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.1]
config file = None
configured module search path = ['/home/betadmin/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /app/python3/lib/python3.8/site-packages/ansible
ansible collection location = /home/betadmin/.ansible/collections:/usr/share/ansible/collections
executable location = /app/python3/bin/ansible
python version = 3.8.5 (default, Sep 18 2020, 14:55:53) [GCC 4.8.5 20150623 (Red Hat 4.8.5-16)]
jinja version = 2.11.2
libyaml = False
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
RHEL 7.4
### Steps to Reproduce
- name: Edit password_expire_min
user:
name: test
password_expire_min: 0
### Expected Results
Minimum number of days between password change : 0
### Actual Results
Minimum number of days between password change : 2
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75017
|
https://github.com/ansible/ansible/pull/75390
|
5a690239411567e125ae5da5d03494cc0acf6ebf
|
dbde2c2ae3b03469abbe8f2c98b50ffedcf7975f
| 2021-06-16T02:19:51Z |
python
| 2022-01-24T19:52:38Z |
test/integration/targets/user/tasks/test_expires_min_max.yml
|
# https://github.com/ansible/ansible/issues/68775
- name: Test setting maximum expiration
when: ansible_facts.os_family in ['RedHat', 'Debian', 'Suse']
block:
- name: create user
user:
name: ansibulluser
state: present
- name: add maximum expire date for password
user:
name: ansibulluser
password_expire_max: 10
register: pass_max_1_0
- name: again add maximum expire date for password
user:
name: ansibulluser
password_expire_max: 10
register: pass_max_1_1
- name: validate result for maximum expire date
assert:
that:
- pass_max_1_0 is changed
- pass_max_1_1 is not changed
- name: add minimum expire date for password
user:
name: ansibulluser
password_expire_min: 5
register: pass_min_2_0
- name: again add minimum expire date for password
user:
name: ansibulluser
password_expire_min: 5
register: pass_min_2_1
- name: validate result for minimum expire date
assert:
that:
- pass_min_2_0 is changed
- pass_min_2_1 is not changed
- name: Get shadow data for ansibulluser
getent:
database: shadow
key: ansibulluser
- name: Ensure password expiration was set properly
assert:
that:
- ansible_facts.getent_shadow['ansibulluser'][2] == '5'
- ansible_facts.getent_shadow['ansibulluser'][3] == '10'
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,049 |
A successful task redo from Ansible debugger does not reactivate previously active hosts if `any_errors_fatal=yes` and `run_once=yes`
|
### Summary
I have a playbook that drivers a cluster software upgrade. Because I need to ensure that all hosts belonging to the cluster will be upgraded, I have set `any_errors_fatal=yes` at play level. Also, in order to perform on-the-fly fixes for any issue raised during the playbook execution, I also have set `debugger=on_failure` at play level.
One of the upgrade steps requires an action to be performed in a single node of the cluster, so on that particular task, I defined `run_once=yes`.
However, under the combination of `any_errors_fatal=yes`, `debugger=on_failure` and `run_once=yes`, Ansible is not reactivating hosts that were active when a task intended to run only once faiis and is repeated. Instead, only the host where the task is executed comes back to to the list active hosts. As a consequence, if the debugger is invoked, the cluster software upgrade becomes incomplete.
### Issue Type
Bug Report
### Component Name
lib/ansible/plugins/strategy/__init__.py
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/amg1127/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.9/site-packages/ansible
ansible collection location = /home/amg1127/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.9.7 (default, Aug 31 2021, 13:28:12) [GCC 11.1.0]
jinja version = 3.0.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
# (blank output) #
```
### OS / Environment
Arch Linux
### Steps to Reproduce
Please store the below content as `inventory.yml`:
```yaml (paste below)
---
all:
hosts:
host1:
ansible_connection: local
host2:
ansible_connection: local
```
And store the below content as `playbook.yml`:
```yaml (paste below)
---
- hosts: all
gather_facts: no
debugger: on_failed
any_errors_fatal: yes
tasks:
- name: Task 1
debug:
var: inventory_hostname
- name: Task 2
assert:
that: 'false'
run_once: yes
- name: Task 3
debug:
var: inventory_hostname
```
Run this command line: `ansible-playbook -i inventory.yml playbook.yml`
Once the Ansible debugger is invoked after the `Task 2` failure, update the `that` argument for the `assert` module so that the evaluation is always `true`. Afterwards, redo the failed task.
```
(debug)> task.args['that'] = 'true'
(debug)> r
```
### Expected Results
I expected `host2` to execute `Task 3` after the successful execution of `Task 2`.
```
PLAY [all] *************************************************************************************************************
TASK [Task 1] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
TASK [Task 2] **********************************************************************************************************
fatal: [host1]: FAILED! => {
"assertion": "false",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
[host1] TASK: Task 2 (debug)> task.args['that'] = 'true'
[host1] TASK: Task 2 (debug)> r
ok: [host1] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [Task 3] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
PLAY RECAP *************************************************************************************************************
host1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
host2 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Actual Results
```console
PLAY [all] *************************************************************************************************************
TASK [Task 1] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
TASK [Task 2] **********************************************************************************************************
fatal: [host1]: FAILED! => {
"assertion": "false",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
[host1] TASK: Task 2 (debug)> task.args['that'] = 'true'
[host1] TASK: Task 2 (debug)> r
ok: [host1] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [Task 3] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
PLAY RECAP *************************************************************************************************************
host1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
host2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76049
|
https://github.com/ansible/ansible/pull/76814
|
cad200406af5df3e5b5034b141578bc44b142f0d
|
29de2cccba67f1feda93f28e9056240de4e35c3c
| 2021-10-15T10:30:08Z |
python
| 2022-01-26T14:43:12Z |
changelogs/fragments/76049-task-debugger-run_once.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,049 |
A successful task redo from Ansible debugger does not reactivate previously active hosts if `any_errors_fatal=yes` and `run_once=yes`
|
### Summary
I have a playbook that drivers a cluster software upgrade. Because I need to ensure that all hosts belonging to the cluster will be upgraded, I have set `any_errors_fatal=yes` at play level. Also, in order to perform on-the-fly fixes for any issue raised during the playbook execution, I also have set `debugger=on_failure` at play level.
One of the upgrade steps requires an action to be performed in a single node of the cluster, so on that particular task, I defined `run_once=yes`.
However, under the combination of `any_errors_fatal=yes`, `debugger=on_failure` and `run_once=yes`, Ansible is not reactivating hosts that were active when a task intended to run only once faiis and is repeated. Instead, only the host where the task is executed comes back to to the list active hosts. As a consequence, if the debugger is invoked, the cluster software upgrade becomes incomplete.
### Issue Type
Bug Report
### Component Name
lib/ansible/plugins/strategy/__init__.py
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/amg1127/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.9/site-packages/ansible
ansible collection location = /home/amg1127/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.9.7 (default, Aug 31 2021, 13:28:12) [GCC 11.1.0]
jinja version = 3.0.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
# (blank output) #
```
### OS / Environment
Arch Linux
### Steps to Reproduce
Please store the below content as `inventory.yml`:
```yaml (paste below)
---
all:
hosts:
host1:
ansible_connection: local
host2:
ansible_connection: local
```
And store the below content as `playbook.yml`:
```yaml (paste below)
---
- hosts: all
gather_facts: no
debugger: on_failed
any_errors_fatal: yes
tasks:
- name: Task 1
debug:
var: inventory_hostname
- name: Task 2
assert:
that: 'false'
run_once: yes
- name: Task 3
debug:
var: inventory_hostname
```
Run this command line: `ansible-playbook -i inventory.yml playbook.yml`
Once the Ansible debugger is invoked after the `Task 2` failure, update the `that` argument for the `assert` module so that the evaluation is always `true`. Afterwards, redo the failed task.
```
(debug)> task.args['that'] = 'true'
(debug)> r
```
### Expected Results
I expected `host2` to execute `Task 3` after the successful execution of `Task 2`.
```
PLAY [all] *************************************************************************************************************
TASK [Task 1] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
TASK [Task 2] **********************************************************************************************************
fatal: [host1]: FAILED! => {
"assertion": "false",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
[host1] TASK: Task 2 (debug)> task.args['that'] = 'true'
[host1] TASK: Task 2 (debug)> r
ok: [host1] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [Task 3] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
PLAY RECAP *************************************************************************************************************
host1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
host2 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Actual Results
```console
PLAY [all] *************************************************************************************************************
TASK [Task 1] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
TASK [Task 2] **********************************************************************************************************
fatal: [host1]: FAILED! => {
"assertion": "false",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
[host1] TASK: Task 2 (debug)> task.args['that'] = 'true'
[host1] TASK: Task 2 (debug)> r
ok: [host1] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [Task 3] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
PLAY RECAP *************************************************************************************************************
host1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
host2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76049
|
https://github.com/ansible/ansible/pull/76814
|
cad200406af5df3e5b5034b141578bc44b142f0d
|
29de2cccba67f1feda93f28e9056240de4e35c3c
| 2021-10-15T10:30:08Z |
python
| 2022-01-26T14:43:12Z |
lib/ansible/plugins/strategy/__init__.py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import cmd
import functools
import os
import pprint
import sys
import threading
import time
from collections import deque
from multiprocessing import Lock
from queue import Queue
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible import context
from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleUndefinedVariable, AnsibleParserError
from ansible.executor import action_write_locks
from ansible.executor.play_iterator import IteratingStates, FailedStates
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
from ansible.executor.task_queue_manager import CallbackSend
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.playbook.conditional import Conditional
from ansible.playbook.handler import Handler
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.plugins import loader as plugin_loader
from ansible.template import Templar
from ansible.utils.display import Display
from ansible.utils.unsafe_proxy import wrap_var
from ansible.utils.vars import combine_vars
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
display = Display()
__all__ = ['StrategyBase']
# This list can be an exact match, or start of string bound
# does not accept regex
ALWAYS_DELEGATE_FACT_PREFIXES = frozenset((
'discovered_interpreter_',
))
class StrategySentinel:
pass
_sentinel = StrategySentinel()
def post_process_whens(result, task, templar):
cond = None
if task.changed_when:
cond = Conditional(loader=templar._loader)
cond.when = task.changed_when
result['changed'] = cond.evaluate_conditional(templar, templar.available_variables)
if task.failed_when:
if cond is None:
cond = Conditional(loader=templar._loader)
cond.when = task.failed_when
failed_when_result = cond.evaluate_conditional(templar, templar.available_variables)
result['failed_when_result'] = result['failed'] = failed_when_result
def results_thread_main(strategy):
while True:
try:
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
elif isinstance(result, CallbackSend):
for arg in result.args:
if isinstance(arg, TaskResult):
strategy.normalize_task_result(arg)
break
strategy._tqm.send_callback(result.method_name, *result.args, **result.kwargs)
elif isinstance(result, TaskResult):
strategy.normalize_task_result(result)
with strategy._results_lock:
# only handlers have the listen attr, so this must be a handler
# we split up the results into two queues here to make sure
# handler and regular result processing don't cross wires
if 'listen' in result._task_fields:
strategy._handler_results.append(result)
else:
strategy._results.append(result)
else:
display.warning('Received an invalid object (%s) in the result queue: %r' % (type(result), result))
except (IOError, EOFError):
break
except Queue.Empty:
pass
def debug_closure(func):
"""Closure to wrap ``StrategyBase._process_pending_results`` and invoke the task debugger"""
@functools.wraps(func)
def inner(self, iterator, one_pass=False, max_passes=None, do_handlers=False):
status_to_stats_map = (
('is_failed', 'failures'),
('is_unreachable', 'dark'),
('is_changed', 'changed'),
('is_skipped', 'skipped'),
)
# We don't know the host yet, copy the previous states, for lookup after we process new results
prev_host_states = iterator._host_states.copy()
results = func(self, iterator, one_pass=one_pass, max_passes=max_passes, do_handlers=do_handlers)
_processed_results = []
for result in results:
task = result._task
host = result._host
_queued_task_args = self._queued_task_cache.pop((host.name, task._uuid), None)
task_vars = _queued_task_args['task_vars']
play_context = _queued_task_args['play_context']
# Try to grab the previous host state, if it doesn't exist use get_host_state to generate an empty state
try:
prev_host_state = prev_host_states[host.name]
except KeyError:
prev_host_state = iterator.get_host_state(host)
while result.needs_debugger(globally_enabled=self.debugger_active):
next_action = NextAction()
dbg = Debugger(task, host, task_vars, play_context, result, next_action)
dbg.cmdloop()
if next_action.result == NextAction.REDO:
# rollback host state
self._tqm.clear_failed_hosts()
iterator.set_state_for_host(host.name, prev_host_state)
for method, what in status_to_stats_map:
if getattr(result, method)():
self._tqm._stats.decrement(what, host.name)
self._tqm._stats.decrement('ok', host.name)
# redo
self._queue_task(host, task, task_vars, play_context)
_processed_results.extend(debug_closure(func)(self, iterator, one_pass))
break
elif next_action.result == NextAction.CONTINUE:
_processed_results.append(result)
break
elif next_action.result == NextAction.EXIT:
# Matches KeyboardInterrupt from bin/ansible
sys.exit(99)
else:
_processed_results.append(result)
return _processed_results
return inner
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
# by default, strategies should support throttling but we allow individual
# strategies to disable this and either forego supporting it or managing
# the throttling internally (as `free` does)
ALLOW_BASE_THROTTLING = True
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm._workers
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = context.CLIARGS.get('step', False)
self._diff = context.CLIARGS.get('diff', False)
# the task cache is a dictionary of tuples of (host.name, task._uuid)
# used to find the original task object of in-flight tasks and to store
# the task args/vars and play context info used to queue the task.
self._queued_task_cache = {}
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# internal counters
self._pending_results = 0
self._pending_handler_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
# this dictionary is used to keep track of hosts that have
# flushed handlers
self._flushed_hosts = dict()
self._results = deque()
self._handler_results = deque()
self._results_lock = threading.Condition(threading.Lock())
# create the result processing thread for reading results in the background
self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
self._results_thread.daemon = True
self._results_thread.start()
# holds the list of active (persistent) connections to be shutdown at
# play completion
self._active_connections = dict()
# Caches for get_host calls, to avoid calling excessively
# These values should be set at the top of the ``run`` method of each
# strategy plugin. Use ``_set_hosts_cache`` to set these values
self._hosts_cache = []
self._hosts_cache_all = []
self.debugger_active = C.ENABLE_TASK_DEBUGGER
def _set_hosts_cache(self, play, refresh=True):
"""Responsible for setting _hosts_cache and _hosts_cache_all
See comment in ``__init__`` for the purpose of these caches
"""
if not refresh and all((self._hosts_cache, self._hosts_cache_all)):
return
if not play.finalized and Templar(None).is_template(play.hosts):
_pattern = 'all'
else:
_pattern = play.hosts or 'all'
self._hosts_cache_all = [h.name for h in self._inventory.get_hosts(pattern=_pattern, ignore_restrictions=True)]
self._hosts_cache = [h.name for h in self._inventory.get_hosts(play.hosts, order=play.order)]
def cleanup(self):
# close active persistent connections
for sock in self._active_connections.values():
try:
conn = Connection(sock)
conn.reset()
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
self._final_q.put(_sentinel)
self._results_thread.join()
def run(self, iterator, play_context, result=0):
# execute one more pass through the iterator without peeking, to
# make sure that all of the hosts are advanced to their final task.
# This should be safe, as everything should be IteratingStates.COMPLETE by
# this point, though the strategy may not advance the hosts itself.
for host in self._hosts_cache:
if host not in self._tqm._unreachable_hosts:
try:
iterator.get_next_task_for_host(self._inventory.hosts[host])
except KeyError:
iterator.get_next_task_for_host(self._inventory.get_host(host))
# save the failed/unreachable hosts, as the run_handlers()
# method will clear that information during its execution
failed_hosts = iterator.get_failed_hosts()
unreachable_hosts = self._tqm._unreachable_hosts.keys()
display.debug("running handlers")
handler_result = self.run_handlers(iterator, play_context)
if isinstance(handler_result, bool) and not handler_result:
result |= self._tqm.RUN_ERROR
elif not handler_result:
result |= handler_result
# now update with the hosts (if any) that failed or were
# unreachable during the handler execution phase
failed_hosts = set(failed_hosts).union(iterator.get_failed_hosts())
unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys())
# return the appropriate code, depending on the status hosts after the run
if not isinstance(result, bool) and result != self._tqm.RUN_OK:
return result
elif len(unreachable_hosts) > 0:
return self._tqm.RUN_UNREACHABLE_HOSTS
elif len(failed_hosts) > 0:
return self._tqm.RUN_FAILED_HOSTS
else:
return self._tqm.RUN_OK
def get_hosts_remaining(self, play):
self._set_hosts_cache(play, refresh=False)
ignore = set(self._tqm._failed_hosts).union(self._tqm._unreachable_hosts)
return [host for host in self._hosts_cache if host not in ignore]
def get_failed_hosts(self, play):
self._set_hosts_cache(play, refresh=False)
return [host for host in self._hosts_cache if host in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
vars['ansible_current_hosts'] = self.get_hosts_remaining(play)
vars['ansible_failed_hosts'] = self.get_failed_hosts(play)
def _queue_task(self, host, task, task_vars, play_context):
''' handles queueing the task up to be sent to a worker '''
display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
# Add a write lock for tasks.
# Maybe this should be added somewhere further up the call stack but
# this is the earliest in the code where we have task (1) extracted
# into its own variable and (2) there's only a single code path
# leading to the module being run. This is called by three
# functions: __init__.py::_do_handler_run(), linear.py::run(), and
# free.py::run() so we'd have to add to all three to do it there.
# The next common higher level is __init__.py::run() and that has
# tasks inside of play_iterator so we'd have to extract them to do it
# there.
if task.action not in action_write_locks.action_write_locks:
display.debug('Creating lock for %s' % task.action)
action_write_locks.action_write_locks[task.action] = Lock()
# create a templar and template things we need later for the queuing process
templar = Templar(loader=self._loader, variables=task_vars)
try:
throttle = int(templar.template(task.throttle))
except Exception as e:
raise AnsibleError("Failed to convert the throttle value to an integer.", obj=task._ds, orig_exc=e)
# and then queue the new task
try:
# Determine the "rewind point" of the worker list. This means we start
# iterating over the list of workers until the end of the list is found.
# Normally, that is simply the length of the workers list (as determined
# by the forks or serial setting), however a task/block/play may "throttle"
# that limit down.
rewind_point = len(self._workers)
if throttle > 0 and self.ALLOW_BASE_THROTTLING:
if task.run_once:
display.debug("Ignoring 'throttle' as 'run_once' is also set for '%s'" % task.get_name())
else:
if throttle <= rewind_point:
display.debug("task: %s, throttle: %d" % (task.get_name(), throttle))
rewind_point = throttle
queued = False
starting_worker = self._cur_worker
while True:
if self._cur_worker >= rewind_point:
self._cur_worker = 0
worker_prc = self._workers[self._cur_worker]
if worker_prc is None or not worker_prc.is_alive():
self._queued_task_cache[(host.name, task._uuid)] = {
'host': host,
'task': task,
'task_vars': task_vars,
'play_context': play_context
}
worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, plugin_loader)
self._workers[self._cur_worker] = worker_prc
self._tqm.send_callback('v2_runner_on_start', host, task)
worker_prc.start()
display.debug("worker is %d (out of %d available)" % (self._cur_worker + 1, len(self._workers)))
queued = True
self._cur_worker += 1
if self._cur_worker >= rewind_point:
self._cur_worker = 0
if queued:
break
elif self._cur_worker == starting_worker:
time.sleep(0.0001)
if isinstance(task, Handler):
self._pending_handler_results += 1
else:
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
display.debug("got an error while queuing: %s" % e)
return
display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
def get_task_hosts(self, iterator, task_host, task):
if task.run_once:
host_list = [host for host in self._hosts_cache if host not in self._tqm._unreachable_hosts]
else:
host_list = [task_host.name]
return host_list
def get_delegated_hosts(self, result, task):
host_name = result.get('_ansible_delegated_vars', {}).get('ansible_delegated_host', None)
return [host_name or task.delegate_to]
def _set_always_delegated_facts(self, result, task):
"""Sets host facts for ``delegate_to`` hosts for facts that should
always be delegated
This operation mutates ``result`` to remove the always delegated facts
See ``ALWAYS_DELEGATE_FACT_PREFIXES``
"""
if task.delegate_to is None:
return
facts = result['ansible_facts']
always_keys = set()
_add = always_keys.add
for fact_key in facts:
for always_key in ALWAYS_DELEGATE_FACT_PREFIXES:
if fact_key.startswith(always_key):
_add(fact_key)
if always_keys:
_pop = facts.pop
always_facts = {
'ansible_facts': dict((k, _pop(k)) for k in list(facts) if k in always_keys)
}
host_list = self.get_delegated_hosts(result, task)
_set_host_facts = self._variable_manager.set_host_facts
for target_host in host_list:
_set_host_facts(target_host, always_facts)
def normalize_task_result(self, task_result):
"""Normalize a TaskResult to reference actual Host and Task objects
when only given the ``Host.name``, or the ``Task._uuid``
Only the ``Host.name`` and ``Task._uuid`` are commonly sent back from
the ``TaskExecutor`` or ``WorkerProcess`` due to performance concerns
Mutates the original object
"""
if isinstance(task_result._host, string_types):
# If the value is a string, it is ``Host.name``
task_result._host = self._inventory.get_host(to_text(task_result._host))
if isinstance(task_result._task, string_types):
# If the value is a string, it is ``Task._uuid``
queue_cache_entry = (task_result._host.name, task_result._task)
try:
found_task = self._queued_task_cache[queue_cache_entry]['task']
except KeyError:
# This should only happen due to an implicit task created by the
# TaskExecutor, restrict this behavior to the explicit use case
# of an implicit async_status task
if task_result._task_fields.get('action') != 'async_status':
raise
original_task = Task()
else:
original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
original_task._parent = found_task._parent
original_task.from_attrs(task_result._task_fields)
task_result._task = original_task
return task_result
@debug_closure
def _process_pending_results(self, iterator, one_pass=False, max_passes=None, do_handlers=False):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
handler_templar = Templar(self._loader)
def search_handler_blocks_by_name(handler_name, handler_blocks):
# iterate in reversed order since last handler loaded with the same name wins
for handler_block in reversed(handler_blocks):
for handler_task in handler_block.block:
if handler_task.name:
try:
if not handler_task.cached_name:
if handler_templar.is_template(handler_task.name):
handler_templar.available_variables = self._variable_manager.get_vars(play=iterator._play,
task=handler_task,
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all)
handler_task.name = handler_templar.template(handler_task.name)
handler_task.cached_name = True
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
candidates = (
handler_task.name,
handler_task.get_name(include_role_fqcn=False),
handler_task.get_name(include_role_fqcn=True),
)
if handler_name in candidates:
return handler_task
except (UndefinedError, AnsibleUndefinedVariable) as e:
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
if not handler_task.listen:
display.warning(
"Handler '%s' is unusable because it has no listen topics and "
"the name could not be templated (host-specific variables are "
"not supported in handler names). The error: %s" % (handler_task.name, to_text(e))
)
continue
return None
cur_pass = 0
while True:
try:
self._results_lock.acquire()
if do_handlers:
task_result = self._handler_results.popleft()
else:
task_result = self._results.popleft()
except IndexError:
break
finally:
self._results_lock.release()
original_host = task_result._host
original_task = task_result._task
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
if task_result.is_failed():
role_ran = True
ignore_errors = original_task.ignore_errors
if not ignore_errors:
display.debug("marking %s as failed" % original_host.name)
if original_task.run_once:
# if we're using run_once, we have to fail every host here
for h in self._inventory.get_hosts(iterator._play.hosts):
if h.name not in self._tqm._unreachable_hosts:
iterator.mark_host_failed(h)
else:
iterator.mark_host_failed(original_host)
# grab the current state and if we're iterating on the rescue portion
# of a block then we save the failed task in a special var for use
# within the rescue/always
state, _ = iterator.get_next_task_for_host(original_host, peek=True)
if iterator.is_failed(original_host) and state and state.run_state == IteratingStates.COMPLETE:
self._tqm._failed_hosts[original_host.name] = True
# Use of get_active_state() here helps detect proper state if, say, we are in a rescue
# block from an included file (include_tasks). In a non-included rescue case, a rescue
# that starts with a new 'block' will have an active state of IteratingStates.TASKS, so we also
# check the current state block tree to see if any blocks are rescuing.
if state and (iterator.get_active_state(state).run_state == IteratingStates.RESCUE or
iterator.is_any_block_rescuing(state)):
self._tqm._stats.increment('rescued', original_host.name)
self._variable_manager.set_nonpersistent_facts(
original_host.name,
dict(
ansible_failed_task=wrap_var(original_task.serialize()),
ansible_failed_result=task_result._result,
),
)
else:
self._tqm._stats.increment('failures', original_host.name)
else:
self._tqm._stats.increment('ok', original_host.name)
self._tqm._stats.increment('ignored', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
elif task_result.is_unreachable():
ignore_unreachable = original_task.ignore_unreachable
if not ignore_unreachable:
self._tqm._unreachable_hosts[original_host.name] = True
iterator._play._removed_hosts.append(original_host.name)
else:
self._tqm._stats.increment('skipped', original_host.name)
task_result._result['skip_reason'] = 'Host %s is unreachable' % original_host.name
self._tqm._stats.increment('dark', original_host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif task_result.is_skipped():
self._tqm._stats.increment('skipped', original_host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
else:
role_ran = True
if original_task.loop:
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
result_items = task_result._result.get('results', [])
else:
result_items = [task_result._result]
for result_item in result_items:
if '_ansible_notify' in result_item:
if task_result.is_changed():
# The shared dictionary for notified handlers is a proxy, which
# does not detect when sub-objects within the proxy are modified.
# So, per the docs, we reassign the list so the proxy picks up and
# notifies all other threads
for handler_name in result_item['_ansible_notify']:
found = False
# Find the handler using the above helper. First we look up the
# dependency chain of the current task (if it's from a role), otherwise
# we just look through the list of handlers in the current play/all
# roles and use the first one that matches the notify name
target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers)
if target_handler is not None:
found = True
if target_handler.notify_host(original_host):
self._tqm.send_callback('v2_playbook_on_notify', target_handler, original_host)
for listening_handler_block in iterator._play.handlers:
for listening_handler in listening_handler_block.block:
listeners = getattr(listening_handler, 'listen', []) or []
if not listeners:
continue
listeners = listening_handler.get_validated_value(
'listen', listening_handler._valid_attrs['listen'], listeners, handler_templar
)
if handler_name not in listeners:
continue
else:
found = True
if listening_handler.notify_host(original_host):
self._tqm.send_callback('v2_playbook_on_notify', listening_handler, original_host)
# and if none were found, then we raise an error
if not found:
msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
"handlers list" % handler_name)
if C.ERROR_ON_MISSING_HANDLER:
raise AnsibleError(msg)
else:
display.warning(msg)
if 'add_host' in result_item:
# this task added a new host (add_host module)
new_host_info = result_item.get('add_host', dict())
self._add_host(new_host_info, result_item)
post_process_whens(result_item, original_task, handler_templar)
elif 'add_group' in result_item:
# this task added a new group (group_by module)
self._add_group(original_host, result_item)
post_process_whens(result_item, original_task, handler_templar)
if 'ansible_facts' in result_item and original_task.action not in C._ACTION_DEBUG:
# if delegated fact and we are delegating facts, we need to change target host for them
if original_task.delegate_to is not None and original_task.delegate_facts:
host_list = self.get_delegated_hosts(result_item, original_task)
else:
# Set facts that should always be on the delegated hosts
self._set_always_delegated_facts(result_item, original_task)
host_list = self.get_task_hosts(iterator, original_host, original_task)
if original_task.action in C._ACTION_INCLUDE_VARS:
for (var_name, var_value) in result_item['ansible_facts'].items():
# find the host we're actually referring too here, which may
# be a host that is not really in inventory at all
for target_host in host_list:
self._variable_manager.set_host_variable(target_host, var_name, var_value)
else:
cacheable = result_item.pop('_ansible_facts_cacheable', False)
for target_host in host_list:
# so set_fact is a misnomer but 'cacheable = true' was meant to create an 'actual fact'
# to avoid issues with precedence and confusion with set_fact normal operation,
# we set BOTH fact and nonpersistent_facts (aka hostvar)
# when fact is retrieved from cache in subsequent operations it will have the lower precedence,
# but for playbook setting it the 'higher' precedence is kept
is_set_fact = original_task.action in C._ACTION_SET_FACT
if not is_set_fact or cacheable:
self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
if is_set_fact:
self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
host_list = self.get_task_hosts(iterator, original_host, original_task)
else:
host_list = [None]
data = result_item['ansible_stats']['data']
aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
for myhost in host_list:
for k in data.keys():
if aggregate:
self._tqm._stats.update_custom_stats(k, data[k], myhost)
else:
self._tqm._stats.set_custom_stats(k, data[k], myhost)
if 'diff' in task_result._result:
if self._diff or getattr(original_task, 'diff', False):
self._tqm.send_callback('v2_on_file_diff', task_result)
if not isinstance(original_task, TaskInclude):
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
# finally, send the ok for this task
self._tqm.send_callback('v2_runner_on_ok', task_result)
# register final results
if original_task.register:
host_list = self.get_task_hosts(iterator, original_host, original_task)
clean_copy = strip_internal_keys(module_response_deepcopy(task_result._result))
if 'invocation' in clean_copy:
del clean_copy['invocation']
for target_host in host_list:
self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
if do_handlers:
self._pending_handler_results -= 1
else:
self._pending_results -= 1
if original_host.name in self._blocked_hosts:
del self._blocked_hosts[original_host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if original_task._role is not None and role_ran: # TODO: and original_task.action not in C._ACTION_INCLUDE_ROLE:?
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iterator._play.ROLE_CACHE[original_task._role.get_name()].items():
if role_obj._uuid == original_task._role._uuid:
role_obj._had_task_run[original_host.name] = True
ret_results.append(task_result)
if one_pass or max_passes is not None and (cur_pass + 1) >= max_passes:
break
cur_pass += 1
return ret_results
def _wait_on_handler_results(self, iterator, handler, notified_hosts):
'''
Wait for the handler tasks to complete, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
handler_results = 0
display.debug("waiting for handler results...")
while (self._pending_handler_results > 0 and
handler_results < len(notified_hosts) and
not self._tqm._terminated):
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator, do_handlers=True)
ret_results.extend(results)
handler_results += len([
r._host for r in results if r._host in notified_hosts and
r.task_name == handler.name])
if self._pending_handler_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending handlers, returning what we have")
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending results, returning what we have")
return ret_results
def _add_host(self, host_info, result_item):
'''
Helper function to add a new host to inventory based on a task result.
'''
changed = False
if host_info:
host_name = host_info.get('host_name')
# Check if host in inventory, add if not
if host_name not in self._inventory.hosts:
self._inventory.add_host(host_name, 'all')
self._hosts_cache_all.append(host_name)
changed = True
new_host = self._inventory.hosts.get(host_name)
# Set/update the vars for this host
new_host_vars = new_host.get_vars()
new_host_combined_vars = combine_vars(new_host_vars, host_info.get('host_vars', dict()))
if new_host_vars != new_host_combined_vars:
new_host.vars = new_host_combined_vars
changed = True
new_groups = host_info.get('groups', [])
for group_name in new_groups:
if group_name not in self._inventory.groups:
group_name = self._inventory.add_group(group_name)
changed = True
new_group = self._inventory.groups[group_name]
if new_group.add_host(self._inventory.hosts[host_name]):
changed = True
# reconcile inventory, ensures inventory rules are followed
if changed:
self._inventory.reconcile_inventory()
result_item['changed'] = changed
def _add_group(self, host, result_item):
'''
Helper function to add a group (if it does not exist), and to assign the
specified host to that group.
'''
changed = False
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
real_host = self._inventory.hosts.get(host.name)
if real_host is None:
if host.name == self._inventory.localhost.name:
real_host = self._inventory.localhost
else:
raise AnsibleError('%s cannot be matched in inventory' % host.name)
group_name = result_item.get('add_group')
parent_group_names = result_item.get('parent_groups', [])
if group_name not in self._inventory.groups:
group_name = self._inventory.add_group(group_name)
for name in parent_group_names:
if name not in self._inventory.groups:
# create the new group and add it to inventory
self._inventory.add_group(name)
changed = True
group = self._inventory.groups[group_name]
for parent_group_name in parent_group_names:
parent_group = self._inventory.groups[parent_group_name]
new = parent_group.add_child_group(group)
if new and not changed:
changed = True
if real_host not in group.get_hosts():
changed = group.add_host(real_host)
if group not in real_host.get_groups():
changed = real_host.add_group(group)
if changed:
self._inventory.reconcile_inventory()
result_item['changed'] = changed
def _copy_included_file(self, included_file):
'''
A proven safe and performant way to create a copy of an included file
'''
ti_copy = included_file._task.copy(exclude_parent=True)
ti_copy._parent = included_file._task._parent
temp_vars = ti_copy.vars.copy()
temp_vars.update(included_file._vars)
ti_copy.vars = temp_vars
return ti_copy
def _load_included_file(self, included_file, iterator, is_handler=False):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
'''
display.debug("loading included file: %s" % included_file._filename)
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleError("included task files must contain a list of tasks")
ti_copy = self._copy_included_file(included_file)
block_list = load_list_of_blocks(
data,
play=iterator._play,
parent_block=ti_copy.build_parent_block(),
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader,
variable_manager=self._variable_manager,
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
except AnsibleParserError:
raise
except AnsibleError as e:
if isinstance(e, AnsibleFileNotFound):
reason = "Could not find or access '%s' on the Ansible Controller." % to_text(e.file_name)
else:
reason = to_text(e)
# mark all of the hosts including this file as failed, send callbacks,
# and increment the stats for this host
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=reason))
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
return []
# finally, send the callback and return the list of blocks loaded
self._tqm.send_callback('v2_playbook_on_include', included_file)
display.debug("done processing included file")
return block_list
def run_handlers(self, iterator, play_context):
'''
Runs handlers on those hosts which have been notified.
'''
result = self._tqm.RUN_OK
for handler_block in iterator._play.handlers:
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block.block:
if handler.notified_hosts:
result = self._do_handler_run(handler, handler.get_name(), iterator=iterator, play_context=play_context)
if not result:
break
return result
def _do_handler_run(self, handler, handler_name, iterator, play_context, notified_hosts=None):
# FIXME: need to use iterator.get_failed_hosts() instead?
# if not len(self.get_hosts_remaining(iterator._play)):
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
if notified_hosts is None:
notified_hosts = handler.notified_hosts[:]
# strategy plugins that filter hosts need access to the iterator to identify failed hosts
failed_hosts = self._filter_notified_failed_hosts(iterator, notified_hosts)
notified_hosts = self._filter_notified_hosts(notified_hosts)
notified_hosts += failed_hosts
if len(notified_hosts) > 0:
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
bypass_host_loop = False
try:
action = plugin_loader.action_loader.get(handler.action, class_only=True, collection_list=handler.collections)
if getattr(action, 'BYPASS_HOST_LOOP', False):
bypass_host_loop = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
host_results = []
for host in notified_hosts:
if not iterator.is_failed(host) or iterator._play.force_handlers:
task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=handler,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
self.add_tqm_variables(task_vars, play=iterator._play)
templar = Templar(loader=self._loader, variables=task_vars)
if not handler.cached_name:
handler.name = templar.template(handler.name)
handler.cached_name = True
self._queue_task(host, handler, task_vars, play_context)
if templar.template(handler.run_once) or bypass_host_loop:
break
# collect the results from the handler run
host_results = self._wait_on_handler_results(iterator, handler, notified_hosts)
included_files = IncludedFile.process_include_results(
host_results,
iterator=iterator,
loader=self._loader,
variable_manager=self._variable_manager
)
result = True
if len(included_files) > 0:
for included_file in included_files:
try:
new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True)
# for every task in each block brought in by the include, add the list
# of hosts which included the file to the notified_handlers dict
for block in new_blocks:
iterator._play.handlers.append(block)
for task in block.block:
task_name = task.get_name()
display.debug("adding task '%s' included in handler '%s'" % (task_name, handler_name))
task.notified_hosts = included_file._hosts[:]
result = self._do_handler_run(
handler=task,
handler_name=task_name,
iterator=iterator,
play_context=play_context,
notified_hosts=included_file._hosts[:],
)
if not result:
break
except AnsibleParserError:
raise
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
display.warning(to_text(e))
continue
# remove hosts from notification list
handler.notified_hosts = [
h for h in handler.notified_hosts
if h not in notified_hosts]
display.debug("done running handlers, result is: %s" % result)
return result
def _filter_notified_failed_hosts(self, iterator, notified_hosts):
return []
def _filter_notified_hosts(self, notified_hosts):
'''
Filter notified hosts accordingly to strategy
'''
# As main strategy is linear, we do not filter hosts
# We return a copy to avoid race conditions
return notified_hosts[:]
def _take_step(self, task, host=None):
ret = False
msg = u'Perform task: %s ' % task
if host:
msg += u'on %s ' % host
msg += u'(N)o/(y)es/(c)ontinue: '
resp = display.prompt(msg)
if resp.lower() in ['y', 'yes']:
display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
display.debug("User ran task and canceled step mode")
self._step = False
ret = True
else:
display.debug("User skipped task")
display.banner(msg)
return ret
def _cond_not_supported_warn(self, task_name):
display.warning("%s task does not support when conditional" % task_name)
def _execute_meta(self, task, play_context, iterator, target_host):
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
def _evaluate_conditional(h):
all_vars = self._variable_manager.get_vars(play=iterator._play, host=h, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
templar = Templar(loader=self._loader, variables=all_vars)
return task.evaluate_conditional(templar, all_vars)
skipped = False
msg = ''
skip_reason = '%s conditional evaluated to False' % meta_action
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
# These don't support "when" conditionals
if meta_action in ('noop', 'flush_handlers', 'refresh_inventory', 'reset_connection') and task.when:
self._cond_not_supported_warn(meta_action)
if meta_action == 'noop':
msg = "noop"
elif meta_action == 'flush_handlers':
self._flushed_hosts[target_host] = True
self.run_handlers(iterator, play_context)
self._flushed_hosts[target_host] = False
msg = "ran handlers"
elif meta_action == 'refresh_inventory':
self._inventory.refresh_inventory()
self._set_hosts_cache(iterator._play)
msg = "inventory successfully refreshed"
elif meta_action == 'clear_facts':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
hostname = host.get_name()
self._variable_manager.clear_facts(hostname)
msg = "facts cleared"
else:
skipped = True
skip_reason += ', not clearing facts and fact cache for %s' % target_host.name
elif meta_action == 'clear_host_errors':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._tqm._failed_hosts.pop(host.name, False)
self._tqm._unreachable_hosts.pop(host.name, False)
iterator.set_fail_state_for_host(host.name, FailedStates.NONE)
msg = "cleared host errors"
else:
skipped = True
skip_reason += ', not clearing host error state for %s' % target_host.name
elif meta_action == 'end_batch':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator.set_run_state_for_host(host.name, IteratingStates.COMPLETE)
msg = "ending batch"
else:
skipped = True
skip_reason += ', continuing current batch'
elif meta_action == 'end_play':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if host.name not in self._tqm._unreachable_hosts:
iterator.set_run_state_for_host(host.name, IteratingStates.COMPLETE)
# end_play is used in PlaybookExecutor/TQM to indicate that
# the whole play is supposed to be ended as opposed to just a batch
iterator.end_play = True
msg = "ending play"
else:
skipped = True
skip_reason += ', continuing play'
elif meta_action == 'end_host':
if _evaluate_conditional(target_host):
iterator.set_run_state_for_host(target_host.name, IteratingStates.COMPLETE)
iterator._play._removed_hosts.append(target_host.name)
msg = "ending play for %s" % target_host.name
else:
skipped = True
skip_reason += ", continuing execution for %s" % target_host.name
# TODO: Nix msg here? Left for historical reasons, but skip_reason exists now.
msg = "end_host conditional evaluated to false, continuing execution for %s" % target_host.name
elif meta_action == 'role_complete':
# Allow users to use this in a play as reported in https://github.com/ansible/ansible/issues/22286?
# How would this work with allow_duplicates??
if task.implicit:
if target_host.name in task._role._had_task_run:
task._role._completed[target_host.name] = True
msg = 'role_complete for %s' % target_host.name
elif meta_action == 'reset_connection':
all_vars = self._variable_manager.get_vars(play=iterator._play, host=target_host, task=task,
_hosts=self._hosts_cache, _hosts_all=self._hosts_cache_all)
templar = Templar(loader=self._loader, variables=all_vars)
# apply the given task's information to the connection info,
# which may override some fields already set by the play or
# the options specified on the command line
play_context = play_context.set_task_and_variable_override(task=task, variables=all_vars, templar=templar)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
play_context.post_validate(templar=templar)
# now that the play context is finalized, if the remote_addr is not set
# default to using the host's address field as the remote address
if not play_context.remote_addr:
play_context.remote_addr = target_host.address
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist.
play_context.update_vars(all_vars)
if target_host in self._active_connections:
connection = Connection(self._active_connections[target_host])
del self._active_connections[target_host]
else:
connection = plugin_loader.connection_loader.get(play_context.connection, play_context, os.devnull)
connection.set_options(task_keys=task.dump_attrs(), var_options=all_vars)
play_context.set_attributes_from_plugin(connection)
if connection:
try:
connection.reset()
msg = 'reset connection'
except ConnectionError as e:
# most likely socket is already closed
display.debug("got an error while closing persistent connection: %s" % e)
else:
msg = 'no connection, nothing to reset'
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
result = {'msg': msg}
if skipped:
result['skipped'] = True
result['skip_reason'] = skip_reason
else:
result['changed'] = False
display.vv("META: %s" % msg)
res = TaskResult(target_host, task, result)
if skipped:
self._tqm.send_callback('v2_runner_on_skipped', res)
return [res]
def get_hosts_left(self, iterator):
''' returns list of available hosts for this iterator by filtering out unreachables '''
hosts_left = []
for host in self._hosts_cache:
if host not in self._tqm._unreachable_hosts:
try:
hosts_left.append(self._inventory.hosts[host])
except KeyError:
hosts_left.append(self._inventory.get_host(host))
return hosts_left
def update_active_connections(self, results):
''' updates the current active persistent connections '''
for r in results:
if 'args' in r._task_fields:
socket_path = r._task_fields['args'].get('_ansible_socket')
if socket_path:
if r._host not in self._active_connections:
self._active_connections[r._host] = socket_path
class NextAction(object):
""" The next action after an interpreter's exit. """
REDO = 1
CONTINUE = 2
EXIT = 3
def __init__(self, result=EXIT):
self.result = result
class Debugger(cmd.Cmd):
prompt_continuous = '> ' # multiple lines
def __init__(self, task, host, task_vars, play_context, result, next_action):
# cmd.Cmd is old-style class
cmd.Cmd.__init__(self)
self.prompt = '[%s] %s (debug)> ' % (host, task)
self.intro = None
self.scope = {}
self.scope['task'] = task
self.scope['task_vars'] = task_vars
self.scope['host'] = host
self.scope['play_context'] = play_context
self.scope['result'] = result
self.next_action = next_action
def cmdloop(self):
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
pass
do_h = cmd.Cmd.do_help
def do_EOF(self, args):
"""Quit"""
return self.do_quit(args)
def do_quit(self, args):
"""Quit"""
display.display('User interrupted execution')
self.next_action.result = NextAction.EXIT
return True
do_q = do_quit
def do_continue(self, args):
"""Continue to next result"""
self.next_action.result = NextAction.CONTINUE
return True
do_c = do_continue
def do_redo(self, args):
"""Schedule task for re-execution. The re-execution may not be the next result"""
self.next_action.result = NextAction.REDO
return True
do_r = do_redo
def do_update_task(self, args):
"""Recreate the task from ``task._ds``, and template with updated ``task_vars``"""
templar = Templar(None, variables=self.scope['task_vars'])
task = self.scope['task']
task = task.load_data(task._ds)
task.post_validate(templar)
self.scope['task'] = task
do_u = do_update_task
def evaluate(self, args):
try:
return eval(args, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def do_pprint(self, args):
"""Pretty Print"""
try:
result = self.evaluate(args)
display.display(pprint.pformat(result))
except Exception:
pass
do_p = do_pprint
def execute(self, args):
try:
code = compile(args + '\n', '<stdin>', 'single')
exec(code, globals(), self.scope)
except Exception:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else:
exc_type_name = t.__name__
display.display('***%s:%s' % (exc_type_name, repr(v)))
raise
def default(self, line):
try:
self.execute(line)
except Exception:
pass
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,049 |
A successful task redo from Ansible debugger does not reactivate previously active hosts if `any_errors_fatal=yes` and `run_once=yes`
|
### Summary
I have a playbook that drivers a cluster software upgrade. Because I need to ensure that all hosts belonging to the cluster will be upgraded, I have set `any_errors_fatal=yes` at play level. Also, in order to perform on-the-fly fixes for any issue raised during the playbook execution, I also have set `debugger=on_failure` at play level.
One of the upgrade steps requires an action to be performed in a single node of the cluster, so on that particular task, I defined `run_once=yes`.
However, under the combination of `any_errors_fatal=yes`, `debugger=on_failure` and `run_once=yes`, Ansible is not reactivating hosts that were active when a task intended to run only once faiis and is repeated. Instead, only the host where the task is executed comes back to to the list active hosts. As a consequence, if the debugger is invoked, the cluster software upgrade becomes incomplete.
### Issue Type
Bug Report
### Component Name
lib/ansible/plugins/strategy/__init__.py
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/amg1127/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.9/site-packages/ansible
ansible collection location = /home/amg1127/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.9.7 (default, Aug 31 2021, 13:28:12) [GCC 11.1.0]
jinja version = 3.0.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
# (blank output) #
```
### OS / Environment
Arch Linux
### Steps to Reproduce
Please store the below content as `inventory.yml`:
```yaml (paste below)
---
all:
hosts:
host1:
ansible_connection: local
host2:
ansible_connection: local
```
And store the below content as `playbook.yml`:
```yaml (paste below)
---
- hosts: all
gather_facts: no
debugger: on_failed
any_errors_fatal: yes
tasks:
- name: Task 1
debug:
var: inventory_hostname
- name: Task 2
assert:
that: 'false'
run_once: yes
- name: Task 3
debug:
var: inventory_hostname
```
Run this command line: `ansible-playbook -i inventory.yml playbook.yml`
Once the Ansible debugger is invoked after the `Task 2` failure, update the `that` argument for the `assert` module so that the evaluation is always `true`. Afterwards, redo the failed task.
```
(debug)> task.args['that'] = 'true'
(debug)> r
```
### Expected Results
I expected `host2` to execute `Task 3` after the successful execution of `Task 2`.
```
PLAY [all] *************************************************************************************************************
TASK [Task 1] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
TASK [Task 2] **********************************************************************************************************
fatal: [host1]: FAILED! => {
"assertion": "false",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
[host1] TASK: Task 2 (debug)> task.args['that'] = 'true'
[host1] TASK: Task 2 (debug)> r
ok: [host1] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [Task 3] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
PLAY RECAP *************************************************************************************************************
host1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
host2 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Actual Results
```console
PLAY [all] *************************************************************************************************************
TASK [Task 1] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
TASK [Task 2] **********************************************************************************************************
fatal: [host1]: FAILED! => {
"assertion": "false",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
[host1] TASK: Task 2 (debug)> task.args['that'] = 'true'
[host1] TASK: Task 2 (debug)> r
ok: [host1] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [Task 3] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
PLAY RECAP *************************************************************************************************************
host1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
host2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76049
|
https://github.com/ansible/ansible/pull/76814
|
cad200406af5df3e5b5034b141578bc44b142f0d
|
29de2cccba67f1feda93f28e9056240de4e35c3c
| 2021-10-15T10:30:08Z |
python
| 2022-01-26T14:43:12Z |
test/integration/targets/debugger/aliases
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,049 |
A successful task redo from Ansible debugger does not reactivate previously active hosts if `any_errors_fatal=yes` and `run_once=yes`
|
### Summary
I have a playbook that drivers a cluster software upgrade. Because I need to ensure that all hosts belonging to the cluster will be upgraded, I have set `any_errors_fatal=yes` at play level. Also, in order to perform on-the-fly fixes for any issue raised during the playbook execution, I also have set `debugger=on_failure` at play level.
One of the upgrade steps requires an action to be performed in a single node of the cluster, so on that particular task, I defined `run_once=yes`.
However, under the combination of `any_errors_fatal=yes`, `debugger=on_failure` and `run_once=yes`, Ansible is not reactivating hosts that were active when a task intended to run only once faiis and is repeated. Instead, only the host where the task is executed comes back to to the list active hosts. As a consequence, if the debugger is invoked, the cluster software upgrade becomes incomplete.
### Issue Type
Bug Report
### Component Name
lib/ansible/plugins/strategy/__init__.py
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/amg1127/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.9/site-packages/ansible
ansible collection location = /home/amg1127/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.9.7 (default, Aug 31 2021, 13:28:12) [GCC 11.1.0]
jinja version = 3.0.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
# (blank output) #
```
### OS / Environment
Arch Linux
### Steps to Reproduce
Please store the below content as `inventory.yml`:
```yaml (paste below)
---
all:
hosts:
host1:
ansible_connection: local
host2:
ansible_connection: local
```
And store the below content as `playbook.yml`:
```yaml (paste below)
---
- hosts: all
gather_facts: no
debugger: on_failed
any_errors_fatal: yes
tasks:
- name: Task 1
debug:
var: inventory_hostname
- name: Task 2
assert:
that: 'false'
run_once: yes
- name: Task 3
debug:
var: inventory_hostname
```
Run this command line: `ansible-playbook -i inventory.yml playbook.yml`
Once the Ansible debugger is invoked after the `Task 2` failure, update the `that` argument for the `assert` module so that the evaluation is always `true`. Afterwards, redo the failed task.
```
(debug)> task.args['that'] = 'true'
(debug)> r
```
### Expected Results
I expected `host2` to execute `Task 3` after the successful execution of `Task 2`.
```
PLAY [all] *************************************************************************************************************
TASK [Task 1] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
TASK [Task 2] **********************************************************************************************************
fatal: [host1]: FAILED! => {
"assertion": "false",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
[host1] TASK: Task 2 (debug)> task.args['that'] = 'true'
[host1] TASK: Task 2 (debug)> r
ok: [host1] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [Task 3] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
PLAY RECAP *************************************************************************************************************
host1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
host2 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Actual Results
```console
PLAY [all] *************************************************************************************************************
TASK [Task 1] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
TASK [Task 2] **********************************************************************************************************
fatal: [host1]: FAILED! => {
"assertion": "false",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
[host1] TASK: Task 2 (debug)> task.args['that'] = 'true'
[host1] TASK: Task 2 (debug)> r
ok: [host1] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [Task 3] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
PLAY RECAP *************************************************************************************************************
host1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
host2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76049
|
https://github.com/ansible/ansible/pull/76814
|
cad200406af5df3e5b5034b141578bc44b142f0d
|
29de2cccba67f1feda93f28e9056240de4e35c3c
| 2021-10-15T10:30:08Z |
python
| 2022-01-26T14:43:12Z |
test/integration/targets/debugger/inventory
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,049 |
A successful task redo from Ansible debugger does not reactivate previously active hosts if `any_errors_fatal=yes` and `run_once=yes`
|
### Summary
I have a playbook that drivers a cluster software upgrade. Because I need to ensure that all hosts belonging to the cluster will be upgraded, I have set `any_errors_fatal=yes` at play level. Also, in order to perform on-the-fly fixes for any issue raised during the playbook execution, I also have set `debugger=on_failure` at play level.
One of the upgrade steps requires an action to be performed in a single node of the cluster, so on that particular task, I defined `run_once=yes`.
However, under the combination of `any_errors_fatal=yes`, `debugger=on_failure` and `run_once=yes`, Ansible is not reactivating hosts that were active when a task intended to run only once faiis and is repeated. Instead, only the host where the task is executed comes back to to the list active hosts. As a consequence, if the debugger is invoked, the cluster software upgrade becomes incomplete.
### Issue Type
Bug Report
### Component Name
lib/ansible/plugins/strategy/__init__.py
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/amg1127/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.9/site-packages/ansible
ansible collection location = /home/amg1127/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.9.7 (default, Aug 31 2021, 13:28:12) [GCC 11.1.0]
jinja version = 3.0.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
# (blank output) #
```
### OS / Environment
Arch Linux
### Steps to Reproduce
Please store the below content as `inventory.yml`:
```yaml (paste below)
---
all:
hosts:
host1:
ansible_connection: local
host2:
ansible_connection: local
```
And store the below content as `playbook.yml`:
```yaml (paste below)
---
- hosts: all
gather_facts: no
debugger: on_failed
any_errors_fatal: yes
tasks:
- name: Task 1
debug:
var: inventory_hostname
- name: Task 2
assert:
that: 'false'
run_once: yes
- name: Task 3
debug:
var: inventory_hostname
```
Run this command line: `ansible-playbook -i inventory.yml playbook.yml`
Once the Ansible debugger is invoked after the `Task 2` failure, update the `that` argument for the `assert` module so that the evaluation is always `true`. Afterwards, redo the failed task.
```
(debug)> task.args['that'] = 'true'
(debug)> r
```
### Expected Results
I expected `host2` to execute `Task 3` after the successful execution of `Task 2`.
```
PLAY [all] *************************************************************************************************************
TASK [Task 1] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
TASK [Task 2] **********************************************************************************************************
fatal: [host1]: FAILED! => {
"assertion": "false",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
[host1] TASK: Task 2 (debug)> task.args['that'] = 'true'
[host1] TASK: Task 2 (debug)> r
ok: [host1] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [Task 3] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
PLAY RECAP *************************************************************************************************************
host1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
host2 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Actual Results
```console
PLAY [all] *************************************************************************************************************
TASK [Task 1] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
TASK [Task 2] **********************************************************************************************************
fatal: [host1]: FAILED! => {
"assertion": "false",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
[host1] TASK: Task 2 (debug)> task.args['that'] = 'true'
[host1] TASK: Task 2 (debug)> r
ok: [host1] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [Task 3] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
PLAY RECAP *************************************************************************************************************
host1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
host2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76049
|
https://github.com/ansible/ansible/pull/76814
|
cad200406af5df3e5b5034b141578bc44b142f0d
|
29de2cccba67f1feda93f28e9056240de4e35c3c
| 2021-10-15T10:30:08Z |
python
| 2022-01-26T14:43:12Z |
test/integration/targets/debugger/runme.sh
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,049 |
A successful task redo from Ansible debugger does not reactivate previously active hosts if `any_errors_fatal=yes` and `run_once=yes`
|
### Summary
I have a playbook that drivers a cluster software upgrade. Because I need to ensure that all hosts belonging to the cluster will be upgraded, I have set `any_errors_fatal=yes` at play level. Also, in order to perform on-the-fly fixes for any issue raised during the playbook execution, I also have set `debugger=on_failure` at play level.
One of the upgrade steps requires an action to be performed in a single node of the cluster, so on that particular task, I defined `run_once=yes`.
However, under the combination of `any_errors_fatal=yes`, `debugger=on_failure` and `run_once=yes`, Ansible is not reactivating hosts that were active when a task intended to run only once faiis and is repeated. Instead, only the host where the task is executed comes back to to the list active hosts. As a consequence, if the debugger is invoked, the cluster software upgrade becomes incomplete.
### Issue Type
Bug Report
### Component Name
lib/ansible/plugins/strategy/__init__.py
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/amg1127/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.9/site-packages/ansible
ansible collection location = /home/amg1127/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.9.7 (default, Aug 31 2021, 13:28:12) [GCC 11.1.0]
jinja version = 3.0.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
# (blank output) #
```
### OS / Environment
Arch Linux
### Steps to Reproduce
Please store the below content as `inventory.yml`:
```yaml (paste below)
---
all:
hosts:
host1:
ansible_connection: local
host2:
ansible_connection: local
```
And store the below content as `playbook.yml`:
```yaml (paste below)
---
- hosts: all
gather_facts: no
debugger: on_failed
any_errors_fatal: yes
tasks:
- name: Task 1
debug:
var: inventory_hostname
- name: Task 2
assert:
that: 'false'
run_once: yes
- name: Task 3
debug:
var: inventory_hostname
```
Run this command line: `ansible-playbook -i inventory.yml playbook.yml`
Once the Ansible debugger is invoked after the `Task 2` failure, update the `that` argument for the `assert` module so that the evaluation is always `true`. Afterwards, redo the failed task.
```
(debug)> task.args['that'] = 'true'
(debug)> r
```
### Expected Results
I expected `host2` to execute `Task 3` after the successful execution of `Task 2`.
```
PLAY [all] *************************************************************************************************************
TASK [Task 1] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
TASK [Task 2] **********************************************************************************************************
fatal: [host1]: FAILED! => {
"assertion": "false",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
[host1] TASK: Task 2 (debug)> task.args['that'] = 'true'
[host1] TASK: Task 2 (debug)> r
ok: [host1] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [Task 3] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
PLAY RECAP *************************************************************************************************************
host1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
host2 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Actual Results
```console
PLAY [all] *************************************************************************************************************
TASK [Task 1] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
TASK [Task 2] **********************************************************************************************************
fatal: [host1]: FAILED! => {
"assertion": "false",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
[host1] TASK: Task 2 (debug)> task.args['that'] = 'true'
[host1] TASK: Task 2 (debug)> r
ok: [host1] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [Task 3] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
PLAY RECAP *************************************************************************************************************
host1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
host2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76049
|
https://github.com/ansible/ansible/pull/76814
|
cad200406af5df3e5b5034b141578bc44b142f0d
|
29de2cccba67f1feda93f28e9056240de4e35c3c
| 2021-10-15T10:30:08Z |
python
| 2022-01-26T14:43:12Z |
test/integration/targets/debugger/test_run_once.py
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,049 |
A successful task redo from Ansible debugger does not reactivate previously active hosts if `any_errors_fatal=yes` and `run_once=yes`
|
### Summary
I have a playbook that drivers a cluster software upgrade. Because I need to ensure that all hosts belonging to the cluster will be upgraded, I have set `any_errors_fatal=yes` at play level. Also, in order to perform on-the-fly fixes for any issue raised during the playbook execution, I also have set `debugger=on_failure` at play level.
One of the upgrade steps requires an action to be performed in a single node of the cluster, so on that particular task, I defined `run_once=yes`.
However, under the combination of `any_errors_fatal=yes`, `debugger=on_failure` and `run_once=yes`, Ansible is not reactivating hosts that were active when a task intended to run only once faiis and is repeated. Instead, only the host where the task is executed comes back to to the list active hosts. As a consequence, if the debugger is invoked, the cluster software upgrade becomes incomplete.
### Issue Type
Bug Report
### Component Name
lib/ansible/plugins/strategy/__init__.py
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.5]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/amg1127/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.9/site-packages/ansible
ansible collection location = /home/amg1127/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.9.7 (default, Aug 31 2021, 13:28:12) [GCC 11.1.0]
jinja version = 3.0.2
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
# (blank output) #
```
### OS / Environment
Arch Linux
### Steps to Reproduce
Please store the below content as `inventory.yml`:
```yaml (paste below)
---
all:
hosts:
host1:
ansible_connection: local
host2:
ansible_connection: local
```
And store the below content as `playbook.yml`:
```yaml (paste below)
---
- hosts: all
gather_facts: no
debugger: on_failed
any_errors_fatal: yes
tasks:
- name: Task 1
debug:
var: inventory_hostname
- name: Task 2
assert:
that: 'false'
run_once: yes
- name: Task 3
debug:
var: inventory_hostname
```
Run this command line: `ansible-playbook -i inventory.yml playbook.yml`
Once the Ansible debugger is invoked after the `Task 2` failure, update the `that` argument for the `assert` module so that the evaluation is always `true`. Afterwards, redo the failed task.
```
(debug)> task.args['that'] = 'true'
(debug)> r
```
### Expected Results
I expected `host2` to execute `Task 3` after the successful execution of `Task 2`.
```
PLAY [all] *************************************************************************************************************
TASK [Task 1] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
TASK [Task 2] **********************************************************************************************************
fatal: [host1]: FAILED! => {
"assertion": "false",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
[host1] TASK: Task 2 (debug)> task.args['that'] = 'true'
[host1] TASK: Task 2 (debug)> r
ok: [host1] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [Task 3] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
PLAY RECAP *************************************************************************************************************
host1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
host2 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Actual Results
```console
PLAY [all] *************************************************************************************************************
TASK [Task 1] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
ok: [host2] => {
"inventory_hostname": "host2"
}
TASK [Task 2] **********************************************************************************************************
fatal: [host1]: FAILED! => {
"assertion": "false",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
[host1] TASK: Task 2 (debug)> task.args['that'] = 'true'
[host1] TASK: Task 2 (debug)> r
ok: [host1] => {
"changed": false,
"msg": "All assertions passed"
}
TASK [Task 3] **********************************************************************************************************
ok: [host1] => {
"inventory_hostname": "host1"
}
PLAY RECAP *************************************************************************************************************
host1 : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
host2 : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76049
|
https://github.com/ansible/ansible/pull/76814
|
cad200406af5df3e5b5034b141578bc44b142f0d
|
29de2cccba67f1feda93f28e9056240de4e35c3c
| 2021-10-15T10:30:08Z |
python
| 2022-01-26T14:43:12Z |
test/integration/targets/debugger/test_run_once_playbook.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,837 |
update `ansible.legacy` documentation to fix misleading docs
|
### Summary
The existing `ansible.legacy` docs are misleading:
https://docs.ansible.com/ansible/latest/dev_guide/migrating_roles.html#using-ansible-legacy-to-access-local-custom-modules-from-collections-based-roles
Specific feedback:
ansible.legacy will not access plugins in collections, that is only for NON collection roles and plays.
Once moved into collecitons, roles CANNOT house any custom plugins, they MUST be migrated to collection itself
They CAN use plugins provided by other roles, but that should work w/o adding namespacing
Last thing you want to do is set collection: [ansible.legacy] ... that can mess up your resolution big time
it might make more sense to rewrite that section from a general task perspective and leave out collection roles.
### Issue Type
Documentation Report
### Component Name
docs/docsite/rst/dev_guide/migrating_roles.rst
### Ansible Version
```console
$ ansible --version
2.10 and later
```
### Configuration
```console
$ ansible-config dump --only-changed
none
```
### OS / Environment
none
### Additional Information
Need to clarify the documentation to cover this information and also add how `ansible.legacy` can be used for local modules that overwrite `ansible.builtin` modules. For example, if a user can't use the existing `ansible.builtin.copy` module without some modification, they can create their modified 'copy' module and access this as `ansible.legacy.copy`.
`ansible.legacy` uses the "original" resolver for plugins and modules, searching for them in playbook-adjacent directories and in roles.
This only works for the `ansible.builtin` plugins and not for collections.
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76837
|
https://github.com/ansible/ansible/pull/76839
|
6bca0a5dc28037b3a4e8b0e6f45de6f0eb2f2086
|
e9af4195bee12dba5cae682fb6f8ec9bffaa0883
| 2022-01-24T20:56:45Z |
python
| 2022-01-26T17:17:18Z |
docs/docsite/rst/dev_guide/developing_locally.rst
|
.. _using_local_modules_and_plugins:
.. _developing_locally:
**********************************
Adding modules and plugins locally
**********************************
You can extend Ansible by adding custom modules or plugins. You can create them from scratch or copy existing ones for local use. You can store a local module or plugin on your Ansible control node and share it with your team or organization. You can also share plugins and modules by including them in a collection, then publishing the collection on Ansible Galaxy.
If you are using a local module or plugin but Ansible cannot find it, this page is all you need.
If you want to create a plugin or a module, see :ref:`developing_plugins`, :ref:`developing_modules_general` and :ref:`developing_collections`.
Extending Ansible with local modules and plugins offers shortcuts such as:
* You can copy other people's modules and plugins.
* When writing a new module, you can choose any programming language you like.
* You do not have to clone any repositories.
* You do not have to open a pull request.
* You do not have to add tests (though we recommend that you do!).
.. contents::
:local:
.. _modules_vs_plugins:
Modules and plugins: what is the difference?
============================================
If you are looking to add functionality to Ansible, you might wonder whether you need a module or a plugin. Here is a quick overview to help you understand what you need:
* Modules are reusable, standalone scripts that can be used by the Ansible API, the :command:`ansible` command, or the :command:`ansible-playbook` command. Modules provide a defined interface. Each module accepts arguments and returns information to Ansible by printing a JSON string to stdout before exiting. Modules execute on the target system (usually that means on a remote system) in separate processes. Modules are technically plugins, but for historical reasons we do not usually talk about "module plugins".
* :ref:`Plugins <working_with_plugins>` extend Ansible's core functionality and execute on the control node within the ``/usr/bin/ansible`` process. Plugins offer options and extensions for the core features of Ansible - transforming data, logging output, connecting to inventory, and more.
.. _use_collections:
Adding modules and plugins in collections
=========================================
You can add modules and plugins by :ref:`creating a collection <developing_collections>`. With a collection, you can use custom modules and plugins in any playbook or role. You can share your collection easily at any time through Ansible Galaxy.
The rest of this page describes other methods of using local, standalone modules or plugins.
.. _local_modules:
Adding a module outside of a collection
=======================================
You can configure Ansible to load standalone local modules in a specified location or locations and make them available to all playbooks and roles. Alternatively, you can make a non-collection local module available only to specific playbooks or roles.
Adding standalone local modules for all playbooks and roles
-----------------------------------------------------------
To load standalone local modules automatically and make them available to all playbooks and roles, use the :ref:`DEFAULT_MODULE_PATH` configuration setting or the ``ANSIBLE_LIBRARY`` environment variable. The configuration setting and environment variable take a colon-separated list, similar to ``$PATH``. You have two options:
* Add your standalone local module to one of the default configured locations. See the :ref:`DEFAULT_MODULE_PATH` configuration setting for details. Default locations may change without notice.
* Add the location of your standalone local module to an environment variable or configuration:
* the ``ANSIBLE_LIBRARY`` environment variable
* the :ref:`DEFAULT_MODULE_PATH` configuration setting
To view your current configuration settings for modules:
.. code-block:: text
ansible-config dump |grep DEFAULT_MODULE_PATH
After you save your module file in one of these locations, Ansible loads it and you can use it in any local task, playbook, or role.
To confirm that ``my_local_module`` is available:
* type ``ansible localhost -m my_local_module`` to see the output for that module, or
* type ``ansible-doc -t module my_local_module`` to see the documentation for that module
.. note::
Currently, the ``ansible-doc`` command can parse module documentation only from modules written in Python. If you have a module written in a programming language other than Python, please write the documentation in a Python file adjacent to the module file.
Adding standalone local modules for selected playbooks or a single role
-----------------------------------------------------------------------
Ansible automatically loads all executable files from certain directories adjacent to your playbook or role as modules. Standalone modules in these locations are available only to the specific playbook, playbooks, or role in the parent directory.
* To use a standalone module only in a selected playbook or playbooks, store the module in a subdirectory called ``library`` in the directory that contains the playbook or playbooks.
* To use a standalone module only in a single role, store the module in a subdirectory called ``library`` within that role.
.. warning::
Roles contained in collections cannot contain any modules or other plugins. All plugins in a collection must live in the collection ``plugins`` directory tree. All plugins in that tree are accessible to all roles in the collection. If you are developing new modules, we recommend distributing them in :ref:`collections <developing_collections>`, not in roles.
.. _distributing_plugins:
.. _local_plugins:
Adding a non-module plugin locally outside of a collection
==========================================================
You can configure Ansible to load standalone local plugins in a specified location or locations and make them available to all playbooks and roles. Alternatively, you can make a standalone local plugin available only to specific playbooks or roles.
.. note::
Although modules are plugins, the naming patterns for directory names and environment variables that apply to other plugin types do not apply to modules. See :ref:`local_modules`.
Adding local non-module plugins for all playbooks and roles
-----------------------------------------------------------
To load standalone local plugins automatically and make them available to all playbooks and roles, use the configuration setting or environment variable for the type of plugin you are adding. These configuration settings and environment variables take a colon-separated list, similar to ``$PATH``. You have two options:
* Add your local plugin to one of the default configured locations. See :ref:`configuration settings <ansible_configuration_settings>` for details on the correct configuration setting for the plugin type. Default locations may change without notice.
* Add the location of your local plugin to an environment variable or configuration:
* the relevant ``ANSIBLE_plugin_type_PLUGINS`` environment variable - for example, ``$ANSIBLE_INVENTORY_PLUGINS`` or ``$ANSIBLE_VARS_PLUGINS``
* the relevant ``plugin_type_PATH`` configuration setting, most of which begin with ``DEFAULT_`` - for example, ``DEFAULT_CALLBACK_PLUGIN_PATH`` or ``DEFAULT_FILTER_PLUGIN_PATH`` or ``BECOME_PLUGIN_PATH``
To view your current configuration settings for non-module plugins:
.. code-block:: text
ansible-config dump |grep plugin_type_PATH
After your plugin file is added to one of these locations, Ansible loads it and you can use it in any local module, task, playbook, or role. For more information on environment variables and configuration settings, see :ref:`ansible_configuration_settings`.
To confirm that ``plugins/plugin_type/my_local_plugin`` is available:
* type ``ansible-doc -t <plugin_type> my_local_lookup_plugin`` to see the documentation for that plugin - for example, ``ansible-doc -t lookup my_local_lookup_plugin``
The ``ansible-doc`` command works for most plugin types, but not for action, filter, or test plugins. See :ref:`ansible-doc` for more details.
Adding standalone local plugins for selected playbooks or a single role
-----------------------------------------------------------------------
Ansible automatically loads all plugins from certain directories adjacent to your playbook or role, loading each type of plugin separately from a directory named for the type of plugin. Standalone plugins in these locations are available only to the specific playbook, playbooks, or role in the parent directory.
* To use a standalone plugin only in a selected playbook or playbooks, store the plugin in a subdirectory for the correct ``plugin_type`` (for example, ``callback_plugins`` or ``inventory_plugins``) in the directory that contains the playbooks. These directories must use the ``_plugins`` suffix. For a full list of plugin types, see :ref:`working_with_plugins`.
* To use a standalone plugin only in a single role, store the plugin in a subdirectory for the correct ``plugin_type`` (for example, ``cache_plugins`` or ``strategy_plugins``) within that role. When shipped as part of a role, the plugin is available as soon as the role is executed. These directories must use the ``_plugins`` suffix. For a full list of plugin types, see :ref:`working_with_plugins`.
.. warning::
Roles contained in collections cannot contain any plugins. All plugins in a collection must live in the collection ``plugins`` directory tree. All plugins in that tree are accessible to all roles in the collection. If you are developing new plugins, we recommend distributing them in :ref:`collections <developing_collections>`, not in roles.
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,837 |
update `ansible.legacy` documentation to fix misleading docs
|
### Summary
The existing `ansible.legacy` docs are misleading:
https://docs.ansible.com/ansible/latest/dev_guide/migrating_roles.html#using-ansible-legacy-to-access-local-custom-modules-from-collections-based-roles
Specific feedback:
ansible.legacy will not access plugins in collections, that is only for NON collection roles and plays.
Once moved into collecitons, roles CANNOT house any custom plugins, they MUST be migrated to collection itself
They CAN use plugins provided by other roles, but that should work w/o adding namespacing
Last thing you want to do is set collection: [ansible.legacy] ... that can mess up your resolution big time
it might make more sense to rewrite that section from a general task perspective and leave out collection roles.
### Issue Type
Documentation Report
### Component Name
docs/docsite/rst/dev_guide/migrating_roles.rst
### Ansible Version
```console
$ ansible --version
2.10 and later
```
### Configuration
```console
$ ansible-config dump --only-changed
none
```
### OS / Environment
none
### Additional Information
Need to clarify the documentation to cover this information and also add how `ansible.legacy` can be used for local modules that overwrite `ansible.builtin` modules. For example, if a user can't use the existing `ansible.builtin.copy` module without some modification, they can create their modified 'copy' module and access this as `ansible.legacy.copy`.
`ansible.legacy` uses the "original" resolver for plugins and modules, searching for them in playbook-adjacent directories and in roles.
This only works for the `ansible.builtin` plugins and not for collections.
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76837
|
https://github.com/ansible/ansible/pull/76839
|
6bca0a5dc28037b3a4e8b0e6f45de6f0eb2f2086
|
e9af4195bee12dba5cae682fb6f8ec9bffaa0883
| 2022-01-24T20:56:45Z |
python
| 2022-01-26T17:17:18Z |
docs/docsite/rst/dev_guide/migrating_roles.rst
|
.. _migrating_roles:
*************************************************
Migrating Roles to Roles in Collections on Galaxy
*************************************************
You can migrate any existing standalone role into a collection and host the collection on Galaxy. With Ansible collections, you can distribute many roles in a single cohesive unit of re-usable automation. Inside a collection, you can share custom plugins across all roles in the collection instead of duplicating them in each role's :file:`library/`` directory.
You must migrate roles to collections if you want to distribute them as certified Ansible content.
.. note::
If you want to import your collection to Galaxy, you need a `Galaxy namespace <https://galaxy.ansible.com/docs/contributing/namespaces.html>`_.
See :ref:`developing_collections` for details on collections.
.. contents::
:local:
:depth: 1
Comparing standalone roles to collection roles
===============================================
:ref:`Standalone roles <playbooks_reuse_roles>` have the following directory structure:
.. code-block:: bash
:emphasize-lines: 5,7,8
role/
├── defaults
├── files
├── handlers
├── library
├── meta
├── module_utils
├── [*_plugins]
├── tasks
├── templates
├── tests
└── vars
The highlighted directories above will change when you migrate to a collection-based role. The collection directory structure includes a :file:`roles/` directory:
.. code-block:: bash
mynamespace/
└── mycollection/
├── docs/
├── galaxy.yml
├── plugins/
│ ├── modules/
│ │ └── module1.py
│ ├── inventory/
│ └── .../
├── README.md
├── roles/
│ ├── role1/
│ ├── role2/
│ └── .../
├── playbooks/
│ ├── files/
│ ├── vars/
│ ├── templates/
│ └── tasks/
└── tests/
You will need to use the Fully Qualified Collection Name (FQCN) to use the roles and plugins when you migrate your role into a collection. The FQCN is the combination of the collection ``namespace``, collection ``name``, and the content item you are referring to.
So for example, in the above collection, the FQCN to access ``role1`` would be:
.. code-block:: Python
mynamespace.mycollection.role1
A collection can contain one or more roles in the :file:`roles/` directory and these are almost identical to standalone roles, except you need to move plugins out of the individual roles, and use the :abbr:`FQCN (Fully Qualified Collection Name)` in some places, as detailed in the next section.
.. note::
In standalone roles, some of the plugin directories referenced their plugin types in the plural sense; this is not the case in collections.
.. _simple_roles_in_collections:
Migrating a role to a collection
=================================
To migrate from a standalone role that contains no plugins to a collection role:
1. Create a local :file:`ansible_collections` directory and ``cd`` to this new directory.
2. Create a collection. If you want to import this collection to Ansible Galaxy, you need a `Galaxy namespace <https://galaxy.ansible.com/docs/contributing/namespaces.html>`_.
.. code-block:: bash
$ ansible-galaxy collection init mynamespace.mycollection
This creates the collection directory structure.
3. Copy the standalone role directory into the :file:`roles/` subdirectory of the collection. Roles in collections cannot have hyphens in the role name. Rename any such roles to use underscores instead.
.. code-block:: bash
$ mkdir mynamespace/mycollection/roles/my_role/
$ cp -r /path/to/standalone/role/mynamespace/my_role/\* mynamespace/mycollection/roles/my_role/
4. Update ``galaxy.yml`` to include any role dependencies.
5. Update the collection README.md file to add links to any role README.md files.
.. _complex_roles_in_collections:
Migrating a role that contains plugins to a collection
======================================================
To migrate from a standalone role that has plugins to a collection role:
1. Create a local :file:`ansible_collections directory` and ``cd`` to this new directory.
2. Create a collection. If you want to import this collection to Ansible Galaxy, you need a `Galaxy namespace <https://galaxy.ansible.com/docs/contributing/namespaces.html>`_.
.. code-block:: bash
$ ansible-galaxy collection init mynamespace.mycollection
This creates the collection directory structure.
3. Copy the standalone role directory into the :file:`roles/` subdirectory of the collection. Roles in collections cannot have hyphens in the role name. Rename any such roles to use underscores instead.
.. code-block:: bash
$ mkdir mynamespace/mycollection/roles/my_role/
$ cp -r /path/to/standalone/role/mynamespace/my_role/\* mynamespace/mycollection/roles/my_role/
4. Move any modules to the :file:`plugins/modules/` directory.
.. code-block:: bash
$ mv -r mynamespace/mycollection/roles/my_role/library/\* mynamespace/mycollection/plugins/modules/
5. Move any other plugins to the appropriate :file:`plugins/PLUGINTYPE/` directory. See :ref:`migrating_plugins_collection` for additional steps that may be required.
6. Update ``galaxy.yml`` to include any role dependencies.
7. Update the collection README.md file to add links to any role README.md files.
8. Change any references to the role to use the :abbr:`FQCN (Fully Qualified Collection Name)`.
.. code-block:: yaml
---
- name: example role by FQCN
hosts: some_host_pattern
tasks:
- name: import FQCN role from a collection
import_role:
name: mynamespace.mycollection.my_role
You can alternately use the ``collections`` keyword to simplify this:
.. code-block:: yaml
---
- name: example role by FQCN
hosts: some_host_pattern
collections:
- mynamespace.mycollection
tasks:
- name: import role from a collection
import_role:
name: my_role
.. _migrating_plugins_collection:
Migrating other role plugins to a collection
---------------------------------------------
To migrate other role plugins to a collection:
1. Move each nonmodule plugins to the appropriate :file:`plugins/PLUGINTYPE/` directory. The :file:`mynamespace/mycollection/plugins/README.md` file explains the types of plugins that the collection can contain within optionally created subdirectories.
.. code-block:: bash
$ mv -r mynamespace/mycollection/roles/my_role/filter_plugins/\* mynamespace/mycollection/plugins/filter/
2. Update documentation to use the FQCN. Plugins that use ``doc_fragments`` need to use FQCN (for example, ``mydocfrag`` becomes ``mynamespace.mycollection.mydocfrag``).
3. Update relative imports work in collections to start with a period. For example, :file:`./filename` and :file:`../asdfu/filestuff` works but :file:`filename` in same directory must be updated to :file:`./filename`.
If you have a custom ``module_utils`` or import from ``__init__.py``, you must also:
#. Change the Python namespace for custom ``module_utils`` to use the :abbr:`FQCN (Fully Qualified Collection Name)` along with the ``ansible_collections`` convention. See :ref:`update_module_utils_role`.
#. Change how you import from ``__init__.py``. See :ref:`update_init_role`.
.. _update_module_utils_role:
Updating ``module_utils``
^^^^^^^^^^^^^^^^^^^^^^^^^
If any of your custom modules use a custom module utility, once you migrate to a collection you cannot address the module utility in the top level ``ansible.module_utils`` Python namespace. Ansible does not merge content from collections into the Ansible internal Python namespace. Update any Python import statements that refer to custom module utilities when you migrate your custom content to collections. See :ref:`module_utils in collections <collection_module_utils>` for more details.
When coding with ``module_utils`` in a collection, the Python import statement needs to take into account the :abbr:`FQCN (Fully Qualified Collection Name)` along with the ``ansible_collections`` convention. The resulting Python import looks similar to the following example:
.. code-block:: text
from ansible_collections.{namespace}.{collectionname}.plugins.module_utils.{util} import {something}
.. note::
You need to follow the same rules in changing paths and using namespaced names for subclassed plugins.
The following example code snippets show a Python and a PowerShell module using both default Ansible ``module_utils`` and those provided by a collection. In this example the namespace is ``ansible_example`` and the collection is ``community``.
In the Python example the ``module_utils`` is ``helper`` and the :abbr:`FQCN (Fully Qualified Collection Name)` is ``ansible_example.community.plugins.module_utils.helper``:
.. code-block:: text
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible_collections.ansible_example.community.plugins.module_utils.helper import HelperRequest
argspec = dict(
name=dict(required=True, type='str'),
state=dict(choices=['present', 'absent'], required=True),
)
module = AnsibleModule(
argument_spec=argspec,
supports_check_mode=True
)
_request = HelperRequest(
module,
headers={"Content-Type": "application/json"},
data=data
)
In the PowerShell example the ``module_utils`` is ``hyperv`` and the :abbr:`FQCN (Fully Qualified Collection Name)` is ``ansible_example.community.plugins.module_utils.hyperv``:
.. code-block:: powershell
#!powershell
#AnsibleRequires -CSharpUtil Ansible.Basic
#AnsibleRequires -PowerShell ansible_collections.ansible_example.community.plugins.module_utils.hyperv
$spec = @{
name = @{ required = $true; type = "str" }
state = @{ required = $true; choices = @("present", "absent") }
}
$module = [Ansible.Basic.AnsibleModule]::Create($args, $spec)
Invoke-HyperVFunction -Name $module.Params.name
$module.ExitJson()
.. _update_init_role:
Importing from __init__.py
^^^^^^^^^^^^^^^^^^^^^^^^^^
Because of the way that the CPython interpreter does imports, combined with the way the Ansible plugin loader works, if your custom embedded module or plugin requires importing something from an :file:`__init__.py` file, that also becomes part of your collection. You can either originate the content inside a standalone role or use the file name in the Python import statement. The following example is an :file:`__init__.py` file that is part of a callback plugin found inside a collection named ``ansible_example.community``.
.. code-block:: python
from ansible_collections.ansible_example.community.plugins.callback.__init__ import CustomBaseClass
Example: Migrating a standalone role with plugins to a collection
-----------------------------------------------------------------
In this example we have a standalone role called ``my-standalone-role.webapp`` to emulate a standalone role that contains dashes in the name (which is not valid in collections). This standalone role contains a custom module in the ``library/`` directory called ``manage_webserver``.
.. code-block:: bash
my-standalone-role.webapp
├── defaults
├── files
├── handlers
├── library
├── meta
├── tasks
├── templates
├── tests
└── vars
1. Create a new collection, for example, ``acme.webserver``:
.. code-block:: bash
$ ansible-galaxy collection init acme.webserver
- Collection acme.webserver was created successfully
$ tree acme -d 1
acme
└── webserver
├── docs
├── plugins
└── roles
2. Create the ``webapp`` role inside the collection and copy all contents from the standalone role:
.. code-block:: bash
$ mkdir acme/webserver/roles/webapp
$ cp my-standalone-role.webapp/* acme/webserver/roles/webapp/
3. Move the ``manage_webserver`` module to its new home in ``acme/webserver/plugins/modules/``:
.. code-block:: bash
$ cp my-standalone-role.webapp/library/manage_webserver.py acme/webserver/plugins/modules/manage.py
.. note::
This example changed the original source file ``manage_webserver.py`` to the destination file ``manage.py``. This is optional but the :abbr:`FQCN (Fully Qualified Collection Name)` provides the ``webserver`` context as ``acme.webserver.manage``.
4. Change ``manage_webserver`` to ``acme.webserver.manage`` in :file:`tasks/` files in the role ( for example, ``my-standalone-role.webapp/tasks/main.yml``) and any use of the original module name.
.. note::
This name change is only required if you changed the original module name, but illustrates content referenced by :abbr:`FQCN (Fully Qualified Collection Name)` can offer context and in turn can make module and plugin names shorter. If you anticipate using these modules independent of the role, keep the original naming conventions. Users can add the :ref:`collections keyword <collections_using_playbook>` in their playbooks. Typically roles are an abstraction layer and users won't use components of the role independently.
Example: Supporting standalone roles and migrated collection roles in a downstream RPM
---------------------------------------------------------------------------------------
A standalone role can co-exist with its collection role counterpart (for example, as part of a support lifecycle of a product). This should only be done for a transition period, but these two can exist in downstream in packages such as RPMs. For example, the RHEL system roles could coexist with an `example of a RHEL system roles collection <https://github.com/maxamillion/collection-rhel-system-roles>`_ and provide existing backwards compatibility with the downstream RPM.
This section walks through an example creating this coexistence in a downstream RPM and requires Ansible 2.9.0 or later.
To deliver a role as both a standalone role and a collection role:
#. Place the collection in :file:`/usr/share/ansible/collections/ansible_collections/`.
#. Copy the contents of the role inside the collection into a directory named after the standalone role and place the standalone role in :file:`/usr/share/ansible/roles/`.
All previously bundled modules and plugins used in the standalone role are now referenced by :abbr:`FQCN (Fully Qualified Collection Name)` so even though they are no longer embedded, they can be found from the collection contents.This is an example of how the content inside the collection is a unique entity and does not have to be bound to a role or otherwise. You could alternately create two separate collections: one for the modules and plugins and another for the standalone role to migrate to. The role must use the modules and plugins as :abbr:`FQCN (Fully Qualified Collection Name)`.
The following is an example RPM spec file that accomplishes this using this example content:
.. code-block:: text
Name: acme-ansible-content
Summary: Ansible Collection for deploying and configuring ACME webapp
Version: 1.0.0
Release: 1%{?dist}
License: GPLv3+
Source0: acme-webserver-1.0.0.tar.gz
Url: https://github.com/acme/webserver-ansible-collection
BuildArch: noarch
%global roleprefix my-standalone-role.
%global collection_namespace acme
%global collection_name webserver
%global collection_dir %{_datadir}/ansible/collections/ansible_collections/%{collection_namespace}/%{collection_name}
%description
Ansible Collection and standalone role (for backward compatibility and migration) to deploy, configure, and manage the ACME webapp software.
%prep
%setup -qc
%build
%install
mkdir -p %{buildroot}/%{collection_dir}
cp -r ./* %{buildroot}/%{collection_dir}/
mkdir -p %{buildroot}/%{_datadir}/ansible/roles
for role in %{buildroot}/%{collection_dir}/roles/*
do
cp -pR ${role} %{buildroot}/%{_datadir}/ansible/roles/%{roleprefix}$(basename ${role})
mkdir -p %{buildroot}/%{_pkgdocdir}/$(basename ${role})
for docfile in README.md COPYING LICENSE
do
if [ -f ${role}/${docfile} ]
then
cp -p ${role}/${docfile} %{buildroot}/%{_pkgdocdir}/$(basename ${role})/${docfile}
fi
done
done
%files
%dir %{_datadir}/ansible
%dir %{_datadir}/ansible/roles
%dir %{_datadir}/ansible/collections
%dir %{_datadir}/ansible/collections/ansible_collections
%{_datadir}/ansible/roles/
%doc %{_pkgdocdir}/*/README.md
%doc %{_datadir}/ansible/roles/%{roleprefix}*/README.md
%{collection_dir}
%doc %{collection_dir}/roles/*/README.md
%license %{_pkgdocdir}/*/COPYING
%license %{_pkgdocdir}/*/LICENSE
.. _using_ansible_legacy:
Using ``ansible.legacy`` to access local custom modules from collections-based roles
=====================================================================================
Some roles use :ref:`local custom modules <developing_locally>` that are not part of the role itself. When you move these roles into collections, they can no longer find those custom plugins. You can add the synthetic collection ``ansible.legacy`` to enable legacy behavior and find those custom plugins. Adding ``ansible.legacy`` configures your role to search the pre-collections default paths for modules and plugins.
To enable a role hosted in a collection to find legacy custom modules and other plugins hosted locally:
Edit the role's ``meta/main.yml`` and add the ``ansible.legacy`` collection to your collection-hosted role to enable the use of legacy custom modules and plugins for all tasks:
.. code-block:: yaml
collections:
- ansible.legacy
Alternatively, you can update the tasks directly by changing ``local_module_name`` to ``ansible.legacy.local_module_name``.
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 67,551 |
ansible-test should be able to report a version
|
##### SUMMARY
```
(ansible_base) [vagrant@centos8 ~]$ ansible-test --version
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /home/vagrant
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
ansible-test
|
https://github.com/ansible/ansible/issues/67551
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2020-02-18T21:52:37Z |
python
| 2022-01-27T20:32:11Z |
changelogs/fragments/ansible-test-help-cwd.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 67,551 |
ansible-test should be able to report a version
|
##### SUMMARY
```
(ansible_base) [vagrant@centos8 ~]$ ansible-test --version
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /home/vagrant
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
ansible-test
|
https://github.com/ansible/ansible/issues/67551
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2020-02-18T21:52:37Z |
python
| 2022-01-27T20:32:11Z |
test/integration/targets/ansible-test/collection-tests/unsupported-directory.sh
|
#!/usr/bin/env bash
set -eux -o pipefail
cd "${WORK_DIR}"
if ansible-test --help 1>stdout 2>stderr; then
echo "ansible-test did not fail"
exit 1
fi
grep '^Current working directory: ' stderr
if grep raise stderr; then
echo "ansible-test failed with a traceback instead of an error message"
exit 2
fi
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 67,551 |
ansible-test should be able to report a version
|
##### SUMMARY
```
(ansible_base) [vagrant@centos8 ~]$ ansible-test --version
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /home/vagrant
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
ansible-test
|
https://github.com/ansible/ansible/issues/67551
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2020-02-18T21:52:37Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/__init__.py
|
"""Test runner for all Ansible tests."""
from __future__ import annotations
import os
import sys
import typing as t
# This import should occur as early as possible.
# It must occur before subprocess has been imported anywhere in the current process.
from .init import (
CURRENT_RLIMIT_NOFILE,
)
from .util import (
ApplicationError,
display,
MAXFD,
)
from .delegation import (
delegate,
)
from .executor import (
ApplicationWarning,
Delegate,
ListTargets,
)
from .timeout import (
configure_timeout,
)
from .data import (
data_context,
)
from .util_common import (
CommonConfig,
)
from .cli import (
parse_args,
)
from .provisioning import (
PrimeContainers,
)
def main(cli_args=None): # type: (t.Optional[t.List[str]]) -> None
"""Main program function."""
try:
os.chdir(data_context().content.root)
args = parse_args(cli_args)
config = args.config(args) # type: CommonConfig
display.verbosity = config.verbosity
display.truncate = config.truncate
display.redact = config.redact
display.color = config.color
display.info_stderr = config.info_stderr
configure_timeout(config)
display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2)
display.info('MAXFD: %d' % MAXFD, verbosity=2)
delegate_args = None
target_names = None
try:
args.func(config)
except PrimeContainers:
pass
except ListTargets as ex:
# save target_names for use once we exit the exception handler
target_names = ex.target_names
except Delegate as ex:
# save delegation args for use once we exit the exception handler
delegate_args = (ex.host_state, ex.exclude, ex.require)
if delegate_args:
# noinspection PyTypeChecker
delegate(config, *delegate_args)
if target_names:
for target_name in target_names:
print(target_name) # info goes to stderr, this should be on stdout
display.review_warnings()
config.success = True
except ApplicationWarning as ex:
display.warning(u'%s' % ex)
sys.exit(0)
except ApplicationError as ex:
display.error(u'%s' % ex)
sys.exit(1)
except KeyboardInterrupt:
sys.exit(2)
except BrokenPipeError:
sys.exit(3)
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 67,551 |
ansible-test should be able to report a version
|
##### SUMMARY
```
(ansible_base) [vagrant@centos8 ~]$ ansible-test --version
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /home/vagrant
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
ansible-test
|
https://github.com/ansible/ansible/issues/67551
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2020-02-18T21:52:37Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/cli/__init__.py
|
"""Command line parsing."""
from __future__ import annotations
import argparse
import os
import sys
import typing as t
from .argparsing import (
CompositeActionCompletionFinder,
)
from .commands import (
do_commands,
)
from .compat import (
HostSettings,
convert_legacy_args,
)
def parse_args(argv=None): # type: (t.Optional[t.List[str]]) -> argparse.Namespace
"""Parse command line arguments."""
completer = CompositeActionCompletionFinder()
if completer.enabled:
epilog = 'Tab completion available using the "argcomplete" python package.'
else:
epilog = 'Install the "argcomplete" python package to enable tab completion.'
parser = argparse.ArgumentParser(prog='ansible-test', epilog=epilog)
do_commands(parser, completer)
completer(
parser,
always_complete_options=False,
)
if argv is None:
argv = sys.argv[1:]
else:
argv = argv[1:]
args = parser.parse_args(argv)
if args.explain and not args.verbosity:
args.verbosity = 1
if args.no_environment:
pass
elif args.host_path:
args.host_settings = HostSettings.deserialize(os.path.join(args.host_path, 'settings.dat'))
else:
args.host_settings = convert_legacy_args(argv, args, args.target_mode)
args.host_settings.apply_defaults()
return args
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 67,551 |
ansible-test should be able to report a version
|
##### SUMMARY
```
(ansible_base) [vagrant@centos8 ~]$ ansible-test --version
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /home/vagrant
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
ansible-test
|
https://github.com/ansible/ansible/issues/67551
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2020-02-18T21:52:37Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/cli/environments.py
|
"""Command line parsing for test environments."""
from __future__ import annotations
import argparse
import enum
import functools
import typing as t
from ..constants import (
CONTROLLER_PYTHON_VERSIONS,
REMOTE_PROVIDERS,
SECCOMP_CHOICES,
SUPPORTED_PYTHON_VERSIONS,
)
from ..completion import (
docker_completion,
network_completion,
remote_completion,
windows_completion,
filter_completion,
)
from ..cli.argparsing import (
CompositeAction,
CompositeActionCompletionFinder,
)
from ..cli.argparsing.actions import (
EnumAction,
)
from ..cli.actions import (
DelegatedControllerAction,
NetworkSshTargetAction,
NetworkTargetAction,
OriginControllerAction,
PosixSshTargetAction,
PosixTargetAction,
SanityPythonTargetAction,
UnitsPythonTargetAction,
WindowsSshTargetAction,
WindowsTargetAction,
)
from ..cli.compat import (
TargetMode,
)
from ..config import (
TerminateMode,
)
from .completers import (
complete_choices,
)
from .converters import (
key_value_type,
)
from ..ci import (
get_ci_provider,
)
class ControllerMode(enum.Enum):
"""Type of provisioning to use for the controller."""
NO_DELEGATION = enum.auto()
ORIGIN = enum.auto()
DELEGATED = enum.auto()
def add_environments(
parser, # type: argparse.ArgumentParser
completer, # type: CompositeActionCompletionFinder
controller_mode, # type: ControllerMode
target_mode, # type: TargetMode
): # type: (...) -> None
"""Add arguments for the environments used to run ansible-test and commands it invokes."""
no_environment = controller_mode == ControllerMode.NO_DELEGATION and target_mode == TargetMode.NO_TARGETS
parser.set_defaults(no_environment=no_environment)
if no_environment:
return
parser.set_defaults(target_mode=target_mode)
add_global_options(parser, controller_mode)
add_legacy_environment_options(parser, controller_mode, target_mode)
action_types = add_composite_environment_options(parser, completer, controller_mode, target_mode)
sections = [f'{heading}\n{content}'
for action_type, documentation_state in CompositeAction.documentation_state.items() if action_type in action_types
for heading, content in documentation_state.sections.items()]
if not get_ci_provider().supports_core_ci_auth():
sections.append('Remote provisioning options have been hidden since no Ansible Core CI API key was found.')
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.epilog = '\n\n'.join(sections)
def add_global_options(
parser, # type: argparse.ArgumentParser
controller_mode, # type: ControllerMode
):
"""Add global options for controlling the test environment that work with both the legacy and composite options."""
global_parser = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='global environment arguments'))
global_parser.add_argument(
'--containers',
metavar='JSON',
help=argparse.SUPPRESS,
)
global_parser.add_argument(
'--pypi-proxy',
action='store_true',
help=argparse.SUPPRESS,
)
global_parser.add_argument(
'--pypi-endpoint',
metavar='URI',
help=argparse.SUPPRESS,
)
global_parser.add_argument(
'--requirements',
action='store_true',
default=False,
help='install command requirements',
)
global_parser.add_argument(
'--no-pip-check',
action='store_true',
help=argparse.SUPPRESS, # deprecated, kept for now (with a warning) for backwards compatibility
)
add_global_remote(global_parser, controller_mode)
add_global_docker(global_parser, controller_mode)
def add_composite_environment_options(
parser, # type: argparse.ArgumentParser
completer, # type: CompositeActionCompletionFinder
controller_mode, # type: ControllerMode
target_mode, # type: TargetMode
): # type: (...) -> t.List[t.Type[CompositeAction]]
"""Add composite options for controlling the test environment."""
composite_parser = t.cast(argparse.ArgumentParser, parser.add_argument_group(
title='composite environment arguments (mutually exclusive with "environment arguments" above)'))
composite_parser.add_argument(
'--host-path',
help=argparse.SUPPRESS,
)
action_types = [] # type: t.List[t.Type[CompositeAction]]
def register_action_type(action_type): # type: (t.Type[CompositeAction]) -> t.Type[CompositeAction]
"""Register the provided composite action type and return it."""
action_types.append(action_type)
return action_type
if controller_mode == ControllerMode.NO_DELEGATION:
composite_parser.set_defaults(controller=None)
else:
composite_parser.add_argument(
'--controller',
metavar='OPT',
action=register_action_type(DelegatedControllerAction if controller_mode == ControllerMode.DELEGATED else OriginControllerAction),
help='configuration for the controller',
).completer = completer.completer
if target_mode == TargetMode.NO_TARGETS:
composite_parser.set_defaults(targets=[])
elif target_mode == TargetMode.SHELL:
group = composite_parser.add_mutually_exclusive_group()
group.add_argument(
'--target-posix',
metavar='OPT',
action=register_action_type(PosixSshTargetAction),
help='configuration for the target',
).completer = completer.completer
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
group.add_argument(
'--target-windows',
metavar='OPT',
action=WindowsSshTargetAction if suppress else register_action_type(WindowsSshTargetAction),
help=suppress or 'configuration for the target',
).completer = completer.completer
group.add_argument(
'--target-network',
metavar='OPT',
action=NetworkSshTargetAction if suppress else register_action_type(NetworkSshTargetAction),
help=suppress or 'configuration for the target',
).completer = completer.completer
else:
if target_mode.multiple_pythons:
target_option = '--target-python'
target_help = 'configuration for the target python interpreter(s)'
elif target_mode == TargetMode.POSIX_INTEGRATION:
target_option = '--target'
target_help = 'configuration for the target'
else:
target_option = '--target'
target_help = 'configuration for the target(s)'
target_actions = {
TargetMode.POSIX_INTEGRATION: PosixTargetAction,
TargetMode.WINDOWS_INTEGRATION: WindowsTargetAction,
TargetMode.NETWORK_INTEGRATION: NetworkTargetAction,
TargetMode.SANITY: SanityPythonTargetAction,
TargetMode.UNITS: UnitsPythonTargetAction,
}
target_action = target_actions[target_mode]
composite_parser.add_argument(
target_option,
metavar='OPT',
action=register_action_type(target_action),
help=target_help,
).completer = completer.completer
return action_types
def add_legacy_environment_options(
parser, # type: argparse.ArgumentParser
controller_mode, # type: ControllerMode
target_mode, # type: TargetMode
):
"""Add legacy options for controlling the test environment."""
# noinspection PyTypeChecker
environment = parser.add_argument_group(
title='environment arguments (mutually exclusive with "composite environment arguments" below)') # type: argparse.ArgumentParser
add_environments_python(environment, target_mode)
add_environments_host(environment, controller_mode, target_mode)
def add_environments_python(
environments_parser, # type: argparse.ArgumentParser
target_mode, # type: TargetMode
): # type: (...) -> None
"""Add environment arguments to control the Python version(s) used."""
if target_mode.has_python:
python_versions = SUPPORTED_PYTHON_VERSIONS
else:
python_versions = CONTROLLER_PYTHON_VERSIONS
environments_parser.add_argument(
'--python',
metavar='X.Y',
choices=python_versions + ('default',),
help='python version: %s' % ', '.join(python_versions),
)
environments_parser.add_argument(
'--python-interpreter',
metavar='PATH',
help='path to the python interpreter',
)
def add_environments_host(
environments_parser, # type: argparse.ArgumentParser
controller_mode, # type: ControllerMode
target_mode # type: TargetMode
): # type: (...) -> None
"""Add environment arguments for the given host and argument modes."""
# noinspection PyTypeChecker
environments_exclusive_group = environments_parser.add_mutually_exclusive_group() # type: argparse.ArgumentParser
add_environment_local(environments_exclusive_group)
add_environment_venv(environments_exclusive_group, environments_parser)
if controller_mode == ControllerMode.DELEGATED:
add_environment_remote(environments_exclusive_group, environments_parser, target_mode)
add_environment_docker(environments_exclusive_group, environments_parser, target_mode)
if target_mode == TargetMode.WINDOWS_INTEGRATION:
add_environment_windows(environments_parser)
if target_mode == TargetMode.NETWORK_INTEGRATION:
add_environment_network(environments_parser)
def add_environment_network(
environments_parser, # type: argparse.ArgumentParser
): # type: (...) -> None
"""Add environment arguments for running on a windows host."""
environments_parser.add_argument(
'--platform',
metavar='PLATFORM',
action='append',
help='network platform/version',
).completer = complete_network_platform
environments_parser.add_argument(
'--platform-collection',
type=key_value_type,
metavar='PLATFORM=COLLECTION',
action='append',
help='collection used to test platform',
).completer = complete_network_platform_collection
environments_parser.add_argument(
'--platform-connection',
type=key_value_type,
metavar='PLATFORM=CONNECTION',
action='append',
help='connection used to test platform',
).completer = complete_network_platform_connection
environments_parser.add_argument(
'--inventory',
metavar='PATH',
help='path to inventory used for tests',
)
def add_environment_windows(
environments_parser, # type: argparse.ArgumentParser
): # type: (...) -> None
"""Add environment arguments for running on a windows host."""
environments_parser.add_argument(
'--windows',
metavar='VERSION',
action='append',
help='windows version',
).completer = complete_windows
environments_parser.add_argument(
'--inventory',
metavar='PATH',
help='path to inventory used for tests',
)
def add_environment_local(
exclusive_parser, # type: argparse.ArgumentParser
): # type: (...) -> None
"""Add environment arguments for running on the local (origin) host."""
exclusive_parser.add_argument(
'--local',
action='store_true',
help='run from the local environment',
)
def add_environment_venv(
exclusive_parser, # type: argparse.ArgumentParser
environments_parser, # type: argparse.ArgumentParser
): # type: (...) -> None
"""Add environment arguments for running in ansible-test managed virtual environments."""
exclusive_parser.add_argument(
'--venv',
action='store_true',
help='run from a virtual environment',
)
environments_parser.add_argument(
'--venv-system-site-packages',
action='store_true',
help='enable system site packages')
def add_global_docker(
parser, # type: argparse.ArgumentParser
controller_mode, # type: ControllerMode
): # type: (...) -> None
"""Add global options for Docker."""
if controller_mode != ControllerMode.DELEGATED:
parser.set_defaults(
docker_no_pull=False,
docker_network=None,
docker_terminate=None,
prime_containers=False,
)
return
parser.add_argument(
'--docker-no-pull',
action='store_true',
help=argparse.SUPPRESS, # deprecated, kept for now (with a warning) for backwards compatibility
)
parser.add_argument(
'--docker-network',
metavar='NET',
help='run using the specified network',
)
parser.add_argument(
'--docker-terminate',
metavar='T',
default=TerminateMode.ALWAYS,
type=TerminateMode,
action=EnumAction,
help='terminate the container: %(choices)s (default: %(default)s)',
)
parser.add_argument(
'--prime-containers',
action='store_true',
help='download containers without running tests',
)
def add_environment_docker(
exclusive_parser, # type: argparse.ArgumentParser
environments_parser, # type: argparse.ArgumentParser
target_mode, # type: TargetMode
): # type: (...) -> None
"""Add environment arguments for running in docker containers."""
if target_mode in (TargetMode.POSIX_INTEGRATION, TargetMode.SHELL):
docker_images = sorted(filter_completion(docker_completion()))
else:
docker_images = sorted(filter_completion(docker_completion(), controller_only=True))
exclusive_parser.add_argument(
'--docker',
metavar='IMAGE',
nargs='?',
const='default',
help='run from a docker container',
).completer = functools.partial(complete_choices, docker_images)
environments_parser.add_argument(
'--docker-privileged',
action='store_true',
help='run docker container in privileged mode',
)
environments_parser.add_argument(
'--docker-seccomp',
metavar='SC',
choices=SECCOMP_CHOICES,
help='set seccomp confinement for the test container: %(choices)s',
)
environments_parser.add_argument(
'--docker-memory',
metavar='INT',
type=int,
help='memory limit for docker in bytes',
)
def add_global_remote(
parser, # type: argparse.ArgumentParser
controller_mode, # type: ControllerMode
): # type: (...) -> None
"""Add global options for remote instances."""
if controller_mode != ControllerMode.DELEGATED:
parser.set_defaults(
remote_stage=None,
remote_endpoint=None,
remote_terminate=None,
)
return
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
parser.add_argument(
'--remote-stage',
metavar='STAGE',
default='prod',
help=suppress or 'remote stage to use: prod, dev',
).completer = complete_remote_stage
parser.add_argument(
'--remote-endpoint',
metavar='EP',
help=suppress or 'remote provisioning endpoint to use',
)
parser.add_argument(
'--remote-terminate',
metavar='T',
default=TerminateMode.NEVER,
type=TerminateMode,
action=EnumAction,
help=suppress or 'terminate the remote instance: %(choices)s (default: %(default)s)',
)
def add_environment_remote(
exclusive_parser, # type: argparse.ArgumentParser
environments_parser, # type: argparse.ArgumentParser
target_mode, # type: TargetMode
): # type: (...) -> None
"""Add environment arguments for running in ansible-core-ci provisioned remote virtual machines."""
if target_mode == TargetMode.POSIX_INTEGRATION:
remote_platforms = get_remote_platform_choices()
elif target_mode == TargetMode.SHELL:
remote_platforms = sorted(set(get_remote_platform_choices()) | set(get_windows_platform_choices()))
else:
remote_platforms = get_remote_platform_choices(True)
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
exclusive_parser.add_argument(
'--remote',
metavar='NAME',
help=suppress or 'run from a remote instance',
).completer = functools.partial(complete_choices, remote_platforms)
environments_parser.add_argument(
'--remote-provider',
metavar='PR',
choices=REMOTE_PROVIDERS,
help=suppress or 'remote provider to use: %(choices)s',
)
def complete_remote_stage(prefix: str, **_) -> t.List[str]:
"""Return a list of supported stages matching the given prefix."""
return [stage for stage in ('prod', 'dev') if stage.startswith(prefix)]
def complete_windows(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]:
"""Return a list of supported Windows versions matching the given prefix, excluding versions already parsed from the command line."""
return [i for i in get_windows_version_choices() if i.startswith(prefix) and (not parsed_args.windows or i not in parsed_args.windows)]
def complete_network_platform(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]:
"""Return a list of supported network platforms matching the given prefix, excluding platforms already parsed from the command line."""
images = sorted(filter_completion(network_completion()))
return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)]
def complete_network_platform_collection(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]:
"""Return a list of supported network platforms matching the given prefix, excluding collection platforms already parsed from the command line."""
left = prefix.split('=')[0]
images = sorted(set(image.platform for image in filter_completion(network_completion()).values()))
return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_collection or i not in [x[0] for x in parsed_args.platform_collection])]
def complete_network_platform_connection(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]:
"""Return a list of supported network platforms matching the given prefix, excluding connection platforms already parsed from the command line."""
left = prefix.split('=')[0]
images = sorted(set(image.platform for image in filter_completion(network_completion()).values()))
return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_connection or i not in [x[0] for x in parsed_args.platform_connection])]
def get_remote_platform_choices(controller=False): # type: (bool) -> t.List[str]
"""Return a list of supported remote platforms matching the given prefix."""
return sorted(filter_completion(remote_completion(), controller_only=controller))
def get_windows_platform_choices(): # type: () -> t.List[str]
"""Return a list of supported Windows versions matching the given prefix."""
return sorted(f'windows/{windows.version}' for windows in filter_completion(windows_completion()).values())
def get_windows_version_choices(): # type: () -> t.List[str]
"""Return a list of supported Windows versions."""
return sorted(windows.version for windows in filter_completion(windows_completion()).values())
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 67,551 |
ansible-test should be able to report a version
|
##### SUMMARY
```
(ansible_base) [vagrant@centos8 ~]$ ansible-test --version
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /home/vagrant
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
ansible-test
|
https://github.com/ansible/ansible/issues/67551
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2020-02-18T21:52:37Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/cli/epilog.py
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 67,551 |
ansible-test should be able to report a version
|
##### SUMMARY
```
(ansible_base) [vagrant@centos8 ~]$ ansible-test --version
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /home/vagrant
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
ansible-test
|
https://github.com/ansible/ansible/issues/67551
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2020-02-18T21:52:37Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/data.py
|
"""Context information for the current invocation of ansible-test."""
from __future__ import annotations
import dataclasses
import os
import typing as t
from .util import (
ApplicationError,
import_plugins,
is_subdir,
ANSIBLE_LIB_ROOT,
ANSIBLE_TEST_ROOT,
ANSIBLE_SOURCE_ROOT,
display,
cache,
)
from .provider import (
find_path_provider,
get_path_provider_classes,
ProviderNotFoundForPath,
)
from .provider.source import (
SourceProvider,
)
from .provider.source.unversioned import (
UnversionedSource,
)
from .provider.source.installed import (
InstalledSource,
)
from .provider.layout import (
ContentLayout,
LayoutProvider,
)
class DataContext:
"""Data context providing details about the current execution environment for ansible-test."""
def __init__(self):
content_path = os.environ.get('ANSIBLE_TEST_CONTENT_ROOT')
current_path = os.getcwd()
layout_providers = get_path_provider_classes(LayoutProvider)
source_providers = get_path_provider_classes(SourceProvider)
self.__layout_providers = layout_providers
self.__source_providers = source_providers
self.__ansible_source = None # type: t.Optional[t.Tuple[t.Tuple[str, str], ...]]
self.payload_callbacks = [] # type: t.List[t.Callable[[t.List[t.Tuple[str, str]]], None]]
if content_path:
content = self.__create_content_layout(layout_providers, source_providers, content_path, False)
elif ANSIBLE_SOURCE_ROOT and is_subdir(current_path, ANSIBLE_SOURCE_ROOT):
content = self.__create_content_layout(layout_providers, source_providers, ANSIBLE_SOURCE_ROOT, False)
else:
content = self.__create_content_layout(layout_providers, source_providers, current_path, True)
self.content = content # type: ContentLayout
def create_collection_layouts(self): # type: () -> t.List[ContentLayout]
"""
Return a list of collection layouts, one for each collection in the same collection root as the current collection layout.
An empty list is returned if the current content layout is not a collection layout.
"""
layout = self.content
collection = layout.collection
if not collection:
return []
root_path = os.path.join(collection.root, 'ansible_collections')
display.info('Scanning collection root: %s' % root_path, verbosity=1)
namespace_names = sorted(name for name in os.listdir(root_path) if os.path.isdir(os.path.join(root_path, name)))
collections = []
for namespace_name in namespace_names:
namespace_path = os.path.join(root_path, namespace_name)
collection_names = sorted(name for name in os.listdir(namespace_path) if os.path.isdir(os.path.join(namespace_path, name)))
for collection_name in collection_names:
collection_path = os.path.join(namespace_path, collection_name)
if collection_path == os.path.join(collection.root, collection.directory):
collection_layout = layout
else:
collection_layout = self.__create_content_layout(self.__layout_providers, self.__source_providers, collection_path, False)
file_count = len(collection_layout.all_files())
if not file_count:
continue
display.info('Including collection: %s (%d files)' % (collection_layout.collection.full_name, file_count), verbosity=1)
collections.append(collection_layout)
return collections
@staticmethod
def __create_content_layout(layout_providers, # type: t.List[t.Type[LayoutProvider]]
source_providers, # type: t.List[t.Type[SourceProvider]]
root, # type: str
walk, # type: bool
): # type: (...) -> ContentLayout
"""Create a content layout using the given providers and root path."""
layout_provider = find_path_provider(LayoutProvider, layout_providers, root, walk)
try:
# Begin the search for the source provider at the layout provider root.
# This intentionally ignores version control within subdirectories of the layout root, a condition which was previously an error.
# Doing so allows support for older git versions for which it is difficult to distinguish between a super project and a sub project.
# It also provides a better user experience, since the solution for the user would effectively be the same -- to remove the nested version control.
source_provider = find_path_provider(SourceProvider, source_providers, layout_provider.root, walk)
except ProviderNotFoundForPath:
source_provider = UnversionedSource(layout_provider.root)
layout = layout_provider.create(layout_provider.root, source_provider.get_paths(layout_provider.root))
return layout
def __create_ansible_source(self):
"""Return a tuple of Ansible source files with both absolute and relative paths."""
if not ANSIBLE_SOURCE_ROOT:
sources = []
source_provider = InstalledSource(ANSIBLE_LIB_ROOT)
sources.extend((os.path.join(source_provider.root, path), os.path.join('lib', 'ansible', path))
for path in source_provider.get_paths(source_provider.root))
source_provider = InstalledSource(ANSIBLE_TEST_ROOT)
sources.extend((os.path.join(source_provider.root, path), os.path.join('test', 'lib', 'ansible_test', path))
for path in source_provider.get_paths(source_provider.root))
return tuple(sources)
if self.content.is_ansible:
return tuple((os.path.join(self.content.root, path), path) for path in self.content.all_files())
try:
source_provider = find_path_provider(SourceProvider, self.__source_providers, ANSIBLE_SOURCE_ROOT, False)
except ProviderNotFoundForPath:
source_provider = UnversionedSource(ANSIBLE_SOURCE_ROOT)
return tuple((os.path.join(source_provider.root, path), path) for path in source_provider.get_paths(source_provider.root))
@property
def ansible_source(self): # type: () -> t.Tuple[t.Tuple[str, str], ...]
"""Return a tuple of Ansible source files with both absolute and relative paths."""
if not self.__ansible_source:
self.__ansible_source = self.__create_ansible_source()
return self.__ansible_source
def register_payload_callback(self, callback): # type: (t.Callable[[t.List[t.Tuple[str, str]]], None]) -> None
"""Register the given payload callback."""
self.payload_callbacks.append(callback)
@cache
def data_context(): # type: () -> DataContext
"""Initialize provider plugins."""
provider_types = (
'layout',
'source',
)
for provider_type in provider_types:
import_plugins('provider/%s' % provider_type)
try:
context = DataContext()
except ProviderNotFoundForPath:
options = [
' - an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/',
]
if ANSIBLE_SOURCE_ROOT:
options.insert(0, ' - the Ansible source: %s/' % ANSIBLE_SOURCE_ROOT)
raise ApplicationError('''The current working directory must be at or below:
%s
Current working directory: %s''' % ('\n'.join(options), os.getcwd()))
return context
@dataclasses.dataclass(frozen=True)
class PluginInfo:
"""Information about an Ansible plugin."""
plugin_type: str
name: str
paths: t.List[str]
@cache
def content_plugins():
"""
Analyze content.
The primary purpose of this analysis is to facilitiate mapping of integration tests to the plugin(s) they are intended to test.
"""
plugins = {} # type: t.Dict[str, t.Dict[str, PluginInfo]]
for plugin_type, plugin_directory in data_context().content.plugin_paths.items():
plugin_paths = sorted(data_context().content.walk_files(plugin_directory))
plugin_directory_offset = len(plugin_directory.split(os.path.sep))
plugin_files = {}
for plugin_path in plugin_paths:
plugin_filename = os.path.basename(plugin_path)
plugin_parts = plugin_path.split(os.path.sep)[plugin_directory_offset:-1]
if plugin_filename == '__init__.py':
if plugin_type != 'module_utils':
continue
else:
plugin_name = os.path.splitext(plugin_filename)[0]
if data_context().content.is_ansible and plugin_type == 'modules':
plugin_name = plugin_name.lstrip('_')
plugin_parts.append(plugin_name)
plugin_name = '.'.join(plugin_parts)
plugin_files.setdefault(plugin_name, []).append(plugin_filename)
plugins[plugin_type] = {plugin_name: PluginInfo(
plugin_type=plugin_type,
name=plugin_name,
paths=paths,
) for plugin_name, paths in plugin_files.items()}
return plugins
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 67,551 |
ansible-test should be able to report a version
|
##### SUMMARY
```
(ansible_base) [vagrant@centos8 ~]$ ansible-test --version
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /home/vagrant
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
ansible-test
|
https://github.com/ansible/ansible/issues/67551
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2020-02-18T21:52:37Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/provider/layout/__init__.py
|
"""Code for finding content."""
from __future__ import annotations
import abc
import collections
import os
import typing as t
from ...util import (
ANSIBLE_SOURCE_ROOT,
)
from .. import (
PathProvider,
)
class Layout:
"""Description of content locations and helper methods to access content."""
def __init__(self,
root, # type: str
paths, # type: t.List[str]
): # type: (...) -> None
self.root = root
self.__paths = paths # contains both file paths and symlinked directory paths (ending with os.path.sep)
self.__files = [path for path in paths if not path.endswith(os.path.sep)] # contains only file paths
self.__paths_tree = paths_to_tree(self.__paths)
self.__files_tree = paths_to_tree(self.__files)
def all_files(self, include_symlinked_directories=False): # type: (bool) -> t.List[str]
"""Return a list of all file paths."""
if include_symlinked_directories:
return self.__paths
return self.__files
def walk_files(self, directory, include_symlinked_directories=False): # type: (str, bool) -> t.List[str]
"""Return a list of file paths found recursively under the given directory."""
if include_symlinked_directories:
tree = self.__paths_tree
else:
tree = self.__files_tree
parts = directory.rstrip(os.path.sep).split(os.path.sep)
item = get_tree_item(tree, parts)
if not item:
return []
directories = collections.deque(item[0].values())
files = list(item[1])
while directories:
item = directories.pop()
directories.extend(item[0].values())
files.extend(item[1])
return files
def get_dirs(self, directory): # type: (str) -> t.List[str]
"""Return a list directory paths found directly under the given directory."""
parts = directory.rstrip(os.path.sep).split(os.path.sep)
item = get_tree_item(self.__files_tree, parts)
return [os.path.join(directory, key) for key in item[0].keys()] if item else []
def get_files(self, directory): # type: (str) -> t.List[str]
"""Return a list of file paths found directly under the given directory."""
parts = directory.rstrip(os.path.sep).split(os.path.sep)
item = get_tree_item(self.__files_tree, parts)
return item[1] if item else []
class ContentLayout(Layout):
"""Information about the current Ansible content being tested."""
def __init__(self,
root, # type: str
paths, # type: t.List[str]
plugin_paths, # type: t.Dict[str, str]
collection, # type: t.Optional[CollectionDetail]
test_path, # type: str
results_path, # type: str
sanity_path, # type: str
sanity_messages, # type: t.Optional[LayoutMessages]
integration_path, # type: str
integration_targets_path, # type: str
integration_vars_path, # type: str
integration_messages, # type: t.Optional[LayoutMessages]
unit_path, # type: str
unit_module_path, # type: str
unit_module_utils_path, # type: str
unit_messages, # type: t.Optional[LayoutMessages]
): # type: (...) -> None
super().__init__(root, paths)
self.plugin_paths = plugin_paths
self.collection = collection
self.test_path = test_path
self.results_path = results_path
self.sanity_path = sanity_path
self.sanity_messages = sanity_messages
self.integration_path = integration_path
self.integration_targets_path = integration_targets_path
self.integration_vars_path = integration_vars_path
self.integration_messages = integration_messages
self.unit_path = unit_path
self.unit_module_path = unit_module_path
self.unit_module_utils_path = unit_module_utils_path
self.unit_messages = unit_messages
self.is_ansible = root == ANSIBLE_SOURCE_ROOT
@property
def prefix(self): # type: () -> str
"""Return the collection prefix or an empty string if not a collection."""
if self.collection:
return self.collection.prefix
return ''
@property
def module_path(self): # type: () -> t.Optional[str]
"""Return the path where modules are found, if any."""
return self.plugin_paths.get('modules')
@property
def module_utils_path(self): # type: () -> t.Optional[str]
"""Return the path where module_utils are found, if any."""
return self.plugin_paths.get('module_utils')
@property
def module_utils_powershell_path(self): # type: () -> t.Optional[str]
"""Return the path where powershell module_utils are found, if any."""
if self.is_ansible:
return os.path.join(self.plugin_paths['module_utils'], 'powershell')
return self.plugin_paths.get('module_utils')
@property
def module_utils_csharp_path(self): # type: () -> t.Optional[str]
"""Return the path where csharp module_utils are found, if any."""
if self.is_ansible:
return os.path.join(self.plugin_paths['module_utils'], 'csharp')
return self.plugin_paths.get('module_utils')
class LayoutMessages:
"""Messages generated during layout creation that should be deferred for later display."""
def __init__(self):
self.info = [] # type: t.List[str]
self.warning = [] # type: t.List[str]
self.error = [] # type: t.List[str]
class CollectionDetail:
"""Details about the layout of the current collection."""
def __init__(self,
name, # type: str
namespace, # type: str
root, # type: str
): # type: (...) -> None
self.name = name
self.namespace = namespace
self.root = root
self.full_name = '%s.%s' % (namespace, name)
self.prefix = '%s.' % self.full_name
self.directory = os.path.join('ansible_collections', namespace, name)
class LayoutProvider(PathProvider):
"""Base class for layout providers."""
PLUGIN_TYPES = (
'action',
'become',
'cache',
'callback',
'cliconf',
'connection',
'doc_fragments',
'filter',
'httpapi',
'inventory',
'lookup',
'module_utils',
'modules',
'netconf',
'shell',
'strategy',
'terminal',
'test',
'vars',
# The following are plugin directories not directly supported by ansible-core, but used in collections
# (https://github.com/ansible-collections/overview/blob/main/collection_requirements.rst#modules--plugins)
'plugin_utils',
'sub_plugins',
)
@abc.abstractmethod
def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
"""Create a layout using the given root and paths."""
def paths_to_tree(paths): # type: (t.List[str]) -> t.Tuple[t.Dict[str, t.Any], t.List[str]]
"""Return a filesystem tree from the given list of paths."""
tree = {}, []
for path in paths:
parts = path.split(os.path.sep)
root = tree
for part in parts[:-1]:
if part not in root[0]:
root[0][part] = {}, []
root = root[0][part]
root[1].append(path)
return tree
def get_tree_item(tree, parts): # type: (t.Tuple[t.Dict[str, t.Any], t.List[str]], t.List[str]) -> t.Optional[t.Tuple[t.Dict[str, t.Any], t.List[str]]]
"""Return the portion of the tree found under the path given by parts, or None if it does not exist."""
root = tree
for part in parts:
root = root[0].get(part)
if not root:
return None
return root
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 67,551 |
ansible-test should be able to report a version
|
##### SUMMARY
```
(ansible_base) [vagrant@centos8 ~]$ ansible-test --version
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /home/vagrant
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
ansible-test
|
https://github.com/ansible/ansible/issues/67551
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2020-02-18T21:52:37Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/provider/layout/unsupported.py
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 67,551 |
ansible-test should be able to report a version
|
##### SUMMARY
```
(ansible_base) [vagrant@centos8 ~]$ ansible-test --version
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /home/vagrant
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
ansible-test
|
https://github.com/ansible/ansible/issues/67551
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2020-02-18T21:52:37Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/provider/source/unsupported.py
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 64,523 |
ansible-test --help reports an error if cwd is not a collection path
|
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
The `ansible-test --help` command will fail with an error if you are not inside a collection path.
It's a bit hard as an introduction :) We should be able to get help from anywhere.
##### ISSUE TYPE
Feature Idea
##### COMPONENT NAME
ansible-test
##### ANSIBLE VERSION
```
ansible 2.9.0
config file = None
configured module search path = ['/Users/remi.rey/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/remi.rey/.virtualenvs/ansible/lib/python3.7/site-packages/ansible
executable location = /Users/remi.rey/.virtualenvs/ansible/bin/ansible
python version = 3.7.4 (default, Sep 7 2019, 18:27:02) [Clang 10.0.1 (clang-1001.0.46.4)]
```
##### CONFIGURATION
```paste below
N/A
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
N/A
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```
$ pip install ansible==2.9
$ ansible-test --help
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
```
$ ansible-test --help
usage: ansible-test [-h] COMMAND ...
positional arguments:
COMMAND
integration posix integration tests
network-integration
network integration tests
windows-integration
windows integration tests
units unit tests
sanity sanity tests
shell open an interactive shell
coverage code coverage management and reporting
env show information about the test environment
optional arguments:
-h, --help show this help message and exit
Install the "argcomplete" python package to enable tab completion.
```
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```
$ ansible-test --help
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /Users/remi.rey
```
|
https://github.com/ansible/ansible/issues/64523
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2019-11-06T16:33:23Z |
python
| 2022-01-27T20:32:11Z |
changelogs/fragments/ansible-test-help-cwd.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 64,523 |
ansible-test --help reports an error if cwd is not a collection path
|
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
The `ansible-test --help` command will fail with an error if you are not inside a collection path.
It's a bit hard as an introduction :) We should be able to get help from anywhere.
##### ISSUE TYPE
Feature Idea
##### COMPONENT NAME
ansible-test
##### ANSIBLE VERSION
```
ansible 2.9.0
config file = None
configured module search path = ['/Users/remi.rey/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/remi.rey/.virtualenvs/ansible/lib/python3.7/site-packages/ansible
executable location = /Users/remi.rey/.virtualenvs/ansible/bin/ansible
python version = 3.7.4 (default, Sep 7 2019, 18:27:02) [Clang 10.0.1 (clang-1001.0.46.4)]
```
##### CONFIGURATION
```paste below
N/A
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
N/A
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```
$ pip install ansible==2.9
$ ansible-test --help
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
```
$ ansible-test --help
usage: ansible-test [-h] COMMAND ...
positional arguments:
COMMAND
integration posix integration tests
network-integration
network integration tests
windows-integration
windows integration tests
units unit tests
sanity sanity tests
shell open an interactive shell
coverage code coverage management and reporting
env show information about the test environment
optional arguments:
-h, --help show this help message and exit
Install the "argcomplete" python package to enable tab completion.
```
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```
$ ansible-test --help
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /Users/remi.rey
```
|
https://github.com/ansible/ansible/issues/64523
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2019-11-06T16:33:23Z |
python
| 2022-01-27T20:32:11Z |
test/integration/targets/ansible-test/collection-tests/unsupported-directory.sh
|
#!/usr/bin/env bash
set -eux -o pipefail
cd "${WORK_DIR}"
if ansible-test --help 1>stdout 2>stderr; then
echo "ansible-test did not fail"
exit 1
fi
grep '^Current working directory: ' stderr
if grep raise stderr; then
echo "ansible-test failed with a traceback instead of an error message"
exit 2
fi
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 64,523 |
ansible-test --help reports an error if cwd is not a collection path
|
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
The `ansible-test --help` command will fail with an error if you are not inside a collection path.
It's a bit hard as an introduction :) We should be able to get help from anywhere.
##### ISSUE TYPE
Feature Idea
##### COMPONENT NAME
ansible-test
##### ANSIBLE VERSION
```
ansible 2.9.0
config file = None
configured module search path = ['/Users/remi.rey/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/remi.rey/.virtualenvs/ansible/lib/python3.7/site-packages/ansible
executable location = /Users/remi.rey/.virtualenvs/ansible/bin/ansible
python version = 3.7.4 (default, Sep 7 2019, 18:27:02) [Clang 10.0.1 (clang-1001.0.46.4)]
```
##### CONFIGURATION
```paste below
N/A
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
N/A
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```
$ pip install ansible==2.9
$ ansible-test --help
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
```
$ ansible-test --help
usage: ansible-test [-h] COMMAND ...
positional arguments:
COMMAND
integration posix integration tests
network-integration
network integration tests
windows-integration
windows integration tests
units unit tests
sanity sanity tests
shell open an interactive shell
coverage code coverage management and reporting
env show information about the test environment
optional arguments:
-h, --help show this help message and exit
Install the "argcomplete" python package to enable tab completion.
```
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```
$ ansible-test --help
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /Users/remi.rey
```
|
https://github.com/ansible/ansible/issues/64523
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2019-11-06T16:33:23Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/__init__.py
|
"""Test runner for all Ansible tests."""
from __future__ import annotations
import os
import sys
import typing as t
# This import should occur as early as possible.
# It must occur before subprocess has been imported anywhere in the current process.
from .init import (
CURRENT_RLIMIT_NOFILE,
)
from .util import (
ApplicationError,
display,
MAXFD,
)
from .delegation import (
delegate,
)
from .executor import (
ApplicationWarning,
Delegate,
ListTargets,
)
from .timeout import (
configure_timeout,
)
from .data import (
data_context,
)
from .util_common import (
CommonConfig,
)
from .cli import (
parse_args,
)
from .provisioning import (
PrimeContainers,
)
def main(cli_args=None): # type: (t.Optional[t.List[str]]) -> None
"""Main program function."""
try:
os.chdir(data_context().content.root)
args = parse_args(cli_args)
config = args.config(args) # type: CommonConfig
display.verbosity = config.verbosity
display.truncate = config.truncate
display.redact = config.redact
display.color = config.color
display.info_stderr = config.info_stderr
configure_timeout(config)
display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2)
display.info('MAXFD: %d' % MAXFD, verbosity=2)
delegate_args = None
target_names = None
try:
args.func(config)
except PrimeContainers:
pass
except ListTargets as ex:
# save target_names for use once we exit the exception handler
target_names = ex.target_names
except Delegate as ex:
# save delegation args for use once we exit the exception handler
delegate_args = (ex.host_state, ex.exclude, ex.require)
if delegate_args:
# noinspection PyTypeChecker
delegate(config, *delegate_args)
if target_names:
for target_name in target_names:
print(target_name) # info goes to stderr, this should be on stdout
display.review_warnings()
config.success = True
except ApplicationWarning as ex:
display.warning(u'%s' % ex)
sys.exit(0)
except ApplicationError as ex:
display.error(u'%s' % ex)
sys.exit(1)
except KeyboardInterrupt:
sys.exit(2)
except BrokenPipeError:
sys.exit(3)
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 64,523 |
ansible-test --help reports an error if cwd is not a collection path
|
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
The `ansible-test --help` command will fail with an error if you are not inside a collection path.
It's a bit hard as an introduction :) We should be able to get help from anywhere.
##### ISSUE TYPE
Feature Idea
##### COMPONENT NAME
ansible-test
##### ANSIBLE VERSION
```
ansible 2.9.0
config file = None
configured module search path = ['/Users/remi.rey/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/remi.rey/.virtualenvs/ansible/lib/python3.7/site-packages/ansible
executable location = /Users/remi.rey/.virtualenvs/ansible/bin/ansible
python version = 3.7.4 (default, Sep 7 2019, 18:27:02) [Clang 10.0.1 (clang-1001.0.46.4)]
```
##### CONFIGURATION
```paste below
N/A
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
N/A
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```
$ pip install ansible==2.9
$ ansible-test --help
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
```
$ ansible-test --help
usage: ansible-test [-h] COMMAND ...
positional arguments:
COMMAND
integration posix integration tests
network-integration
network integration tests
windows-integration
windows integration tests
units unit tests
sanity sanity tests
shell open an interactive shell
coverage code coverage management and reporting
env show information about the test environment
optional arguments:
-h, --help show this help message and exit
Install the "argcomplete" python package to enable tab completion.
```
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```
$ ansible-test --help
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /Users/remi.rey
```
|
https://github.com/ansible/ansible/issues/64523
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2019-11-06T16:33:23Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/cli/__init__.py
|
"""Command line parsing."""
from __future__ import annotations
import argparse
import os
import sys
import typing as t
from .argparsing import (
CompositeActionCompletionFinder,
)
from .commands import (
do_commands,
)
from .compat import (
HostSettings,
convert_legacy_args,
)
def parse_args(argv=None): # type: (t.Optional[t.List[str]]) -> argparse.Namespace
"""Parse command line arguments."""
completer = CompositeActionCompletionFinder()
if completer.enabled:
epilog = 'Tab completion available using the "argcomplete" python package.'
else:
epilog = 'Install the "argcomplete" python package to enable tab completion.'
parser = argparse.ArgumentParser(prog='ansible-test', epilog=epilog)
do_commands(parser, completer)
completer(
parser,
always_complete_options=False,
)
if argv is None:
argv = sys.argv[1:]
else:
argv = argv[1:]
args = parser.parse_args(argv)
if args.explain and not args.verbosity:
args.verbosity = 1
if args.no_environment:
pass
elif args.host_path:
args.host_settings = HostSettings.deserialize(os.path.join(args.host_path, 'settings.dat'))
else:
args.host_settings = convert_legacy_args(argv, args, args.target_mode)
args.host_settings.apply_defaults()
return args
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 64,523 |
ansible-test --help reports an error if cwd is not a collection path
|
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
The `ansible-test --help` command will fail with an error if you are not inside a collection path.
It's a bit hard as an introduction :) We should be able to get help from anywhere.
##### ISSUE TYPE
Feature Idea
##### COMPONENT NAME
ansible-test
##### ANSIBLE VERSION
```
ansible 2.9.0
config file = None
configured module search path = ['/Users/remi.rey/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/remi.rey/.virtualenvs/ansible/lib/python3.7/site-packages/ansible
executable location = /Users/remi.rey/.virtualenvs/ansible/bin/ansible
python version = 3.7.4 (default, Sep 7 2019, 18:27:02) [Clang 10.0.1 (clang-1001.0.46.4)]
```
##### CONFIGURATION
```paste below
N/A
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
N/A
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```
$ pip install ansible==2.9
$ ansible-test --help
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
```
$ ansible-test --help
usage: ansible-test [-h] COMMAND ...
positional arguments:
COMMAND
integration posix integration tests
network-integration
network integration tests
windows-integration
windows integration tests
units unit tests
sanity sanity tests
shell open an interactive shell
coverage code coverage management and reporting
env show information about the test environment
optional arguments:
-h, --help show this help message and exit
Install the "argcomplete" python package to enable tab completion.
```
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```
$ ansible-test --help
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /Users/remi.rey
```
|
https://github.com/ansible/ansible/issues/64523
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2019-11-06T16:33:23Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/cli/environments.py
|
"""Command line parsing for test environments."""
from __future__ import annotations
import argparse
import enum
import functools
import typing as t
from ..constants import (
CONTROLLER_PYTHON_VERSIONS,
REMOTE_PROVIDERS,
SECCOMP_CHOICES,
SUPPORTED_PYTHON_VERSIONS,
)
from ..completion import (
docker_completion,
network_completion,
remote_completion,
windows_completion,
filter_completion,
)
from ..cli.argparsing import (
CompositeAction,
CompositeActionCompletionFinder,
)
from ..cli.argparsing.actions import (
EnumAction,
)
from ..cli.actions import (
DelegatedControllerAction,
NetworkSshTargetAction,
NetworkTargetAction,
OriginControllerAction,
PosixSshTargetAction,
PosixTargetAction,
SanityPythonTargetAction,
UnitsPythonTargetAction,
WindowsSshTargetAction,
WindowsTargetAction,
)
from ..cli.compat import (
TargetMode,
)
from ..config import (
TerminateMode,
)
from .completers import (
complete_choices,
)
from .converters import (
key_value_type,
)
from ..ci import (
get_ci_provider,
)
class ControllerMode(enum.Enum):
"""Type of provisioning to use for the controller."""
NO_DELEGATION = enum.auto()
ORIGIN = enum.auto()
DELEGATED = enum.auto()
def add_environments(
parser, # type: argparse.ArgumentParser
completer, # type: CompositeActionCompletionFinder
controller_mode, # type: ControllerMode
target_mode, # type: TargetMode
): # type: (...) -> None
"""Add arguments for the environments used to run ansible-test and commands it invokes."""
no_environment = controller_mode == ControllerMode.NO_DELEGATION and target_mode == TargetMode.NO_TARGETS
parser.set_defaults(no_environment=no_environment)
if no_environment:
return
parser.set_defaults(target_mode=target_mode)
add_global_options(parser, controller_mode)
add_legacy_environment_options(parser, controller_mode, target_mode)
action_types = add_composite_environment_options(parser, completer, controller_mode, target_mode)
sections = [f'{heading}\n{content}'
for action_type, documentation_state in CompositeAction.documentation_state.items() if action_type in action_types
for heading, content in documentation_state.sections.items()]
if not get_ci_provider().supports_core_ci_auth():
sections.append('Remote provisioning options have been hidden since no Ansible Core CI API key was found.')
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.epilog = '\n\n'.join(sections)
def add_global_options(
parser, # type: argparse.ArgumentParser
controller_mode, # type: ControllerMode
):
"""Add global options for controlling the test environment that work with both the legacy and composite options."""
global_parser = t.cast(argparse.ArgumentParser, parser.add_argument_group(title='global environment arguments'))
global_parser.add_argument(
'--containers',
metavar='JSON',
help=argparse.SUPPRESS,
)
global_parser.add_argument(
'--pypi-proxy',
action='store_true',
help=argparse.SUPPRESS,
)
global_parser.add_argument(
'--pypi-endpoint',
metavar='URI',
help=argparse.SUPPRESS,
)
global_parser.add_argument(
'--requirements',
action='store_true',
default=False,
help='install command requirements',
)
global_parser.add_argument(
'--no-pip-check',
action='store_true',
help=argparse.SUPPRESS, # deprecated, kept for now (with a warning) for backwards compatibility
)
add_global_remote(global_parser, controller_mode)
add_global_docker(global_parser, controller_mode)
def add_composite_environment_options(
parser, # type: argparse.ArgumentParser
completer, # type: CompositeActionCompletionFinder
controller_mode, # type: ControllerMode
target_mode, # type: TargetMode
): # type: (...) -> t.List[t.Type[CompositeAction]]
"""Add composite options for controlling the test environment."""
composite_parser = t.cast(argparse.ArgumentParser, parser.add_argument_group(
title='composite environment arguments (mutually exclusive with "environment arguments" above)'))
composite_parser.add_argument(
'--host-path',
help=argparse.SUPPRESS,
)
action_types = [] # type: t.List[t.Type[CompositeAction]]
def register_action_type(action_type): # type: (t.Type[CompositeAction]) -> t.Type[CompositeAction]
"""Register the provided composite action type and return it."""
action_types.append(action_type)
return action_type
if controller_mode == ControllerMode.NO_DELEGATION:
composite_parser.set_defaults(controller=None)
else:
composite_parser.add_argument(
'--controller',
metavar='OPT',
action=register_action_type(DelegatedControllerAction if controller_mode == ControllerMode.DELEGATED else OriginControllerAction),
help='configuration for the controller',
).completer = completer.completer
if target_mode == TargetMode.NO_TARGETS:
composite_parser.set_defaults(targets=[])
elif target_mode == TargetMode.SHELL:
group = composite_parser.add_mutually_exclusive_group()
group.add_argument(
'--target-posix',
metavar='OPT',
action=register_action_type(PosixSshTargetAction),
help='configuration for the target',
).completer = completer.completer
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
group.add_argument(
'--target-windows',
metavar='OPT',
action=WindowsSshTargetAction if suppress else register_action_type(WindowsSshTargetAction),
help=suppress or 'configuration for the target',
).completer = completer.completer
group.add_argument(
'--target-network',
metavar='OPT',
action=NetworkSshTargetAction if suppress else register_action_type(NetworkSshTargetAction),
help=suppress or 'configuration for the target',
).completer = completer.completer
else:
if target_mode.multiple_pythons:
target_option = '--target-python'
target_help = 'configuration for the target python interpreter(s)'
elif target_mode == TargetMode.POSIX_INTEGRATION:
target_option = '--target'
target_help = 'configuration for the target'
else:
target_option = '--target'
target_help = 'configuration for the target(s)'
target_actions = {
TargetMode.POSIX_INTEGRATION: PosixTargetAction,
TargetMode.WINDOWS_INTEGRATION: WindowsTargetAction,
TargetMode.NETWORK_INTEGRATION: NetworkTargetAction,
TargetMode.SANITY: SanityPythonTargetAction,
TargetMode.UNITS: UnitsPythonTargetAction,
}
target_action = target_actions[target_mode]
composite_parser.add_argument(
target_option,
metavar='OPT',
action=register_action_type(target_action),
help=target_help,
).completer = completer.completer
return action_types
def add_legacy_environment_options(
parser, # type: argparse.ArgumentParser
controller_mode, # type: ControllerMode
target_mode, # type: TargetMode
):
"""Add legacy options for controlling the test environment."""
# noinspection PyTypeChecker
environment = parser.add_argument_group(
title='environment arguments (mutually exclusive with "composite environment arguments" below)') # type: argparse.ArgumentParser
add_environments_python(environment, target_mode)
add_environments_host(environment, controller_mode, target_mode)
def add_environments_python(
environments_parser, # type: argparse.ArgumentParser
target_mode, # type: TargetMode
): # type: (...) -> None
"""Add environment arguments to control the Python version(s) used."""
if target_mode.has_python:
python_versions = SUPPORTED_PYTHON_VERSIONS
else:
python_versions = CONTROLLER_PYTHON_VERSIONS
environments_parser.add_argument(
'--python',
metavar='X.Y',
choices=python_versions + ('default',),
help='python version: %s' % ', '.join(python_versions),
)
environments_parser.add_argument(
'--python-interpreter',
metavar='PATH',
help='path to the python interpreter',
)
def add_environments_host(
environments_parser, # type: argparse.ArgumentParser
controller_mode, # type: ControllerMode
target_mode # type: TargetMode
): # type: (...) -> None
"""Add environment arguments for the given host and argument modes."""
# noinspection PyTypeChecker
environments_exclusive_group = environments_parser.add_mutually_exclusive_group() # type: argparse.ArgumentParser
add_environment_local(environments_exclusive_group)
add_environment_venv(environments_exclusive_group, environments_parser)
if controller_mode == ControllerMode.DELEGATED:
add_environment_remote(environments_exclusive_group, environments_parser, target_mode)
add_environment_docker(environments_exclusive_group, environments_parser, target_mode)
if target_mode == TargetMode.WINDOWS_INTEGRATION:
add_environment_windows(environments_parser)
if target_mode == TargetMode.NETWORK_INTEGRATION:
add_environment_network(environments_parser)
def add_environment_network(
environments_parser, # type: argparse.ArgumentParser
): # type: (...) -> None
"""Add environment arguments for running on a windows host."""
environments_parser.add_argument(
'--platform',
metavar='PLATFORM',
action='append',
help='network platform/version',
).completer = complete_network_platform
environments_parser.add_argument(
'--platform-collection',
type=key_value_type,
metavar='PLATFORM=COLLECTION',
action='append',
help='collection used to test platform',
).completer = complete_network_platform_collection
environments_parser.add_argument(
'--platform-connection',
type=key_value_type,
metavar='PLATFORM=CONNECTION',
action='append',
help='connection used to test platform',
).completer = complete_network_platform_connection
environments_parser.add_argument(
'--inventory',
metavar='PATH',
help='path to inventory used for tests',
)
def add_environment_windows(
environments_parser, # type: argparse.ArgumentParser
): # type: (...) -> None
"""Add environment arguments for running on a windows host."""
environments_parser.add_argument(
'--windows',
metavar='VERSION',
action='append',
help='windows version',
).completer = complete_windows
environments_parser.add_argument(
'--inventory',
metavar='PATH',
help='path to inventory used for tests',
)
def add_environment_local(
exclusive_parser, # type: argparse.ArgumentParser
): # type: (...) -> None
"""Add environment arguments for running on the local (origin) host."""
exclusive_parser.add_argument(
'--local',
action='store_true',
help='run from the local environment',
)
def add_environment_venv(
exclusive_parser, # type: argparse.ArgumentParser
environments_parser, # type: argparse.ArgumentParser
): # type: (...) -> None
"""Add environment arguments for running in ansible-test managed virtual environments."""
exclusive_parser.add_argument(
'--venv',
action='store_true',
help='run from a virtual environment',
)
environments_parser.add_argument(
'--venv-system-site-packages',
action='store_true',
help='enable system site packages')
def add_global_docker(
parser, # type: argparse.ArgumentParser
controller_mode, # type: ControllerMode
): # type: (...) -> None
"""Add global options for Docker."""
if controller_mode != ControllerMode.DELEGATED:
parser.set_defaults(
docker_no_pull=False,
docker_network=None,
docker_terminate=None,
prime_containers=False,
)
return
parser.add_argument(
'--docker-no-pull',
action='store_true',
help=argparse.SUPPRESS, # deprecated, kept for now (with a warning) for backwards compatibility
)
parser.add_argument(
'--docker-network',
metavar='NET',
help='run using the specified network',
)
parser.add_argument(
'--docker-terminate',
metavar='T',
default=TerminateMode.ALWAYS,
type=TerminateMode,
action=EnumAction,
help='terminate the container: %(choices)s (default: %(default)s)',
)
parser.add_argument(
'--prime-containers',
action='store_true',
help='download containers without running tests',
)
def add_environment_docker(
exclusive_parser, # type: argparse.ArgumentParser
environments_parser, # type: argparse.ArgumentParser
target_mode, # type: TargetMode
): # type: (...) -> None
"""Add environment arguments for running in docker containers."""
if target_mode in (TargetMode.POSIX_INTEGRATION, TargetMode.SHELL):
docker_images = sorted(filter_completion(docker_completion()))
else:
docker_images = sorted(filter_completion(docker_completion(), controller_only=True))
exclusive_parser.add_argument(
'--docker',
metavar='IMAGE',
nargs='?',
const='default',
help='run from a docker container',
).completer = functools.partial(complete_choices, docker_images)
environments_parser.add_argument(
'--docker-privileged',
action='store_true',
help='run docker container in privileged mode',
)
environments_parser.add_argument(
'--docker-seccomp',
metavar='SC',
choices=SECCOMP_CHOICES,
help='set seccomp confinement for the test container: %(choices)s',
)
environments_parser.add_argument(
'--docker-memory',
metavar='INT',
type=int,
help='memory limit for docker in bytes',
)
def add_global_remote(
parser, # type: argparse.ArgumentParser
controller_mode, # type: ControllerMode
): # type: (...) -> None
"""Add global options for remote instances."""
if controller_mode != ControllerMode.DELEGATED:
parser.set_defaults(
remote_stage=None,
remote_endpoint=None,
remote_terminate=None,
)
return
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
parser.add_argument(
'--remote-stage',
metavar='STAGE',
default='prod',
help=suppress or 'remote stage to use: prod, dev',
).completer = complete_remote_stage
parser.add_argument(
'--remote-endpoint',
metavar='EP',
help=suppress or 'remote provisioning endpoint to use',
)
parser.add_argument(
'--remote-terminate',
metavar='T',
default=TerminateMode.NEVER,
type=TerminateMode,
action=EnumAction,
help=suppress or 'terminate the remote instance: %(choices)s (default: %(default)s)',
)
def add_environment_remote(
exclusive_parser, # type: argparse.ArgumentParser
environments_parser, # type: argparse.ArgumentParser
target_mode, # type: TargetMode
): # type: (...) -> None
"""Add environment arguments for running in ansible-core-ci provisioned remote virtual machines."""
if target_mode == TargetMode.POSIX_INTEGRATION:
remote_platforms = get_remote_platform_choices()
elif target_mode == TargetMode.SHELL:
remote_platforms = sorted(set(get_remote_platform_choices()) | set(get_windows_platform_choices()))
else:
remote_platforms = get_remote_platform_choices(True)
suppress = None if get_ci_provider().supports_core_ci_auth() else argparse.SUPPRESS
exclusive_parser.add_argument(
'--remote',
metavar='NAME',
help=suppress or 'run from a remote instance',
).completer = functools.partial(complete_choices, remote_platforms)
environments_parser.add_argument(
'--remote-provider',
metavar='PR',
choices=REMOTE_PROVIDERS,
help=suppress or 'remote provider to use: %(choices)s',
)
def complete_remote_stage(prefix: str, **_) -> t.List[str]:
"""Return a list of supported stages matching the given prefix."""
return [stage for stage in ('prod', 'dev') if stage.startswith(prefix)]
def complete_windows(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]:
"""Return a list of supported Windows versions matching the given prefix, excluding versions already parsed from the command line."""
return [i for i in get_windows_version_choices() if i.startswith(prefix) and (not parsed_args.windows or i not in parsed_args.windows)]
def complete_network_platform(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]:
"""Return a list of supported network platforms matching the given prefix, excluding platforms already parsed from the command line."""
images = sorted(filter_completion(network_completion()))
return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)]
def complete_network_platform_collection(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]:
"""Return a list of supported network platforms matching the given prefix, excluding collection platforms already parsed from the command line."""
left = prefix.split('=')[0]
images = sorted(set(image.platform for image in filter_completion(network_completion()).values()))
return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_collection or i not in [x[0] for x in parsed_args.platform_collection])]
def complete_network_platform_connection(prefix: str, parsed_args: argparse.Namespace, **_) -> t.List[str]:
"""Return a list of supported network platforms matching the given prefix, excluding connection platforms already parsed from the command line."""
left = prefix.split('=')[0]
images = sorted(set(image.platform for image in filter_completion(network_completion()).values()))
return [i + '=' for i in images if i.startswith(left) and (not parsed_args.platform_connection or i not in [x[0] for x in parsed_args.platform_connection])]
def get_remote_platform_choices(controller=False): # type: (bool) -> t.List[str]
"""Return a list of supported remote platforms matching the given prefix."""
return sorted(filter_completion(remote_completion(), controller_only=controller))
def get_windows_platform_choices(): # type: () -> t.List[str]
"""Return a list of supported Windows versions matching the given prefix."""
return sorted(f'windows/{windows.version}' for windows in filter_completion(windows_completion()).values())
def get_windows_version_choices(): # type: () -> t.List[str]
"""Return a list of supported Windows versions."""
return sorted(windows.version for windows in filter_completion(windows_completion()).values())
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 64,523 |
ansible-test --help reports an error if cwd is not a collection path
|
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
The `ansible-test --help` command will fail with an error if you are not inside a collection path.
It's a bit hard as an introduction :) We should be able to get help from anywhere.
##### ISSUE TYPE
Feature Idea
##### COMPONENT NAME
ansible-test
##### ANSIBLE VERSION
```
ansible 2.9.0
config file = None
configured module search path = ['/Users/remi.rey/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/remi.rey/.virtualenvs/ansible/lib/python3.7/site-packages/ansible
executable location = /Users/remi.rey/.virtualenvs/ansible/bin/ansible
python version = 3.7.4 (default, Sep 7 2019, 18:27:02) [Clang 10.0.1 (clang-1001.0.46.4)]
```
##### CONFIGURATION
```paste below
N/A
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
N/A
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```
$ pip install ansible==2.9
$ ansible-test --help
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
```
$ ansible-test --help
usage: ansible-test [-h] COMMAND ...
positional arguments:
COMMAND
integration posix integration tests
network-integration
network integration tests
windows-integration
windows integration tests
units unit tests
sanity sanity tests
shell open an interactive shell
coverage code coverage management and reporting
env show information about the test environment
optional arguments:
-h, --help show this help message and exit
Install the "argcomplete" python package to enable tab completion.
```
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```
$ ansible-test --help
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /Users/remi.rey
```
|
https://github.com/ansible/ansible/issues/64523
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2019-11-06T16:33:23Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/cli/epilog.py
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 64,523 |
ansible-test --help reports an error if cwd is not a collection path
|
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
The `ansible-test --help` command will fail with an error if you are not inside a collection path.
It's a bit hard as an introduction :) We should be able to get help from anywhere.
##### ISSUE TYPE
Feature Idea
##### COMPONENT NAME
ansible-test
##### ANSIBLE VERSION
```
ansible 2.9.0
config file = None
configured module search path = ['/Users/remi.rey/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/remi.rey/.virtualenvs/ansible/lib/python3.7/site-packages/ansible
executable location = /Users/remi.rey/.virtualenvs/ansible/bin/ansible
python version = 3.7.4 (default, Sep 7 2019, 18:27:02) [Clang 10.0.1 (clang-1001.0.46.4)]
```
##### CONFIGURATION
```paste below
N/A
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
N/A
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```
$ pip install ansible==2.9
$ ansible-test --help
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
```
$ ansible-test --help
usage: ansible-test [-h] COMMAND ...
positional arguments:
COMMAND
integration posix integration tests
network-integration
network integration tests
windows-integration
windows integration tests
units unit tests
sanity sanity tests
shell open an interactive shell
coverage code coverage management and reporting
env show information about the test environment
optional arguments:
-h, --help show this help message and exit
Install the "argcomplete" python package to enable tab completion.
```
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```
$ ansible-test --help
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /Users/remi.rey
```
|
https://github.com/ansible/ansible/issues/64523
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2019-11-06T16:33:23Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/data.py
|
"""Context information for the current invocation of ansible-test."""
from __future__ import annotations
import dataclasses
import os
import typing as t
from .util import (
ApplicationError,
import_plugins,
is_subdir,
ANSIBLE_LIB_ROOT,
ANSIBLE_TEST_ROOT,
ANSIBLE_SOURCE_ROOT,
display,
cache,
)
from .provider import (
find_path_provider,
get_path_provider_classes,
ProviderNotFoundForPath,
)
from .provider.source import (
SourceProvider,
)
from .provider.source.unversioned import (
UnversionedSource,
)
from .provider.source.installed import (
InstalledSource,
)
from .provider.layout import (
ContentLayout,
LayoutProvider,
)
class DataContext:
"""Data context providing details about the current execution environment for ansible-test."""
def __init__(self):
content_path = os.environ.get('ANSIBLE_TEST_CONTENT_ROOT')
current_path = os.getcwd()
layout_providers = get_path_provider_classes(LayoutProvider)
source_providers = get_path_provider_classes(SourceProvider)
self.__layout_providers = layout_providers
self.__source_providers = source_providers
self.__ansible_source = None # type: t.Optional[t.Tuple[t.Tuple[str, str], ...]]
self.payload_callbacks = [] # type: t.List[t.Callable[[t.List[t.Tuple[str, str]]], None]]
if content_path:
content = self.__create_content_layout(layout_providers, source_providers, content_path, False)
elif ANSIBLE_SOURCE_ROOT and is_subdir(current_path, ANSIBLE_SOURCE_ROOT):
content = self.__create_content_layout(layout_providers, source_providers, ANSIBLE_SOURCE_ROOT, False)
else:
content = self.__create_content_layout(layout_providers, source_providers, current_path, True)
self.content = content # type: ContentLayout
def create_collection_layouts(self): # type: () -> t.List[ContentLayout]
"""
Return a list of collection layouts, one for each collection in the same collection root as the current collection layout.
An empty list is returned if the current content layout is not a collection layout.
"""
layout = self.content
collection = layout.collection
if not collection:
return []
root_path = os.path.join(collection.root, 'ansible_collections')
display.info('Scanning collection root: %s' % root_path, verbosity=1)
namespace_names = sorted(name for name in os.listdir(root_path) if os.path.isdir(os.path.join(root_path, name)))
collections = []
for namespace_name in namespace_names:
namespace_path = os.path.join(root_path, namespace_name)
collection_names = sorted(name for name in os.listdir(namespace_path) if os.path.isdir(os.path.join(namespace_path, name)))
for collection_name in collection_names:
collection_path = os.path.join(namespace_path, collection_name)
if collection_path == os.path.join(collection.root, collection.directory):
collection_layout = layout
else:
collection_layout = self.__create_content_layout(self.__layout_providers, self.__source_providers, collection_path, False)
file_count = len(collection_layout.all_files())
if not file_count:
continue
display.info('Including collection: %s (%d files)' % (collection_layout.collection.full_name, file_count), verbosity=1)
collections.append(collection_layout)
return collections
@staticmethod
def __create_content_layout(layout_providers, # type: t.List[t.Type[LayoutProvider]]
source_providers, # type: t.List[t.Type[SourceProvider]]
root, # type: str
walk, # type: bool
): # type: (...) -> ContentLayout
"""Create a content layout using the given providers and root path."""
layout_provider = find_path_provider(LayoutProvider, layout_providers, root, walk)
try:
# Begin the search for the source provider at the layout provider root.
# This intentionally ignores version control within subdirectories of the layout root, a condition which was previously an error.
# Doing so allows support for older git versions for which it is difficult to distinguish between a super project and a sub project.
# It also provides a better user experience, since the solution for the user would effectively be the same -- to remove the nested version control.
source_provider = find_path_provider(SourceProvider, source_providers, layout_provider.root, walk)
except ProviderNotFoundForPath:
source_provider = UnversionedSource(layout_provider.root)
layout = layout_provider.create(layout_provider.root, source_provider.get_paths(layout_provider.root))
return layout
def __create_ansible_source(self):
"""Return a tuple of Ansible source files with both absolute and relative paths."""
if not ANSIBLE_SOURCE_ROOT:
sources = []
source_provider = InstalledSource(ANSIBLE_LIB_ROOT)
sources.extend((os.path.join(source_provider.root, path), os.path.join('lib', 'ansible', path))
for path in source_provider.get_paths(source_provider.root))
source_provider = InstalledSource(ANSIBLE_TEST_ROOT)
sources.extend((os.path.join(source_provider.root, path), os.path.join('test', 'lib', 'ansible_test', path))
for path in source_provider.get_paths(source_provider.root))
return tuple(sources)
if self.content.is_ansible:
return tuple((os.path.join(self.content.root, path), path) for path in self.content.all_files())
try:
source_provider = find_path_provider(SourceProvider, self.__source_providers, ANSIBLE_SOURCE_ROOT, False)
except ProviderNotFoundForPath:
source_provider = UnversionedSource(ANSIBLE_SOURCE_ROOT)
return tuple((os.path.join(source_provider.root, path), path) for path in source_provider.get_paths(source_provider.root))
@property
def ansible_source(self): # type: () -> t.Tuple[t.Tuple[str, str], ...]
"""Return a tuple of Ansible source files with both absolute and relative paths."""
if not self.__ansible_source:
self.__ansible_source = self.__create_ansible_source()
return self.__ansible_source
def register_payload_callback(self, callback): # type: (t.Callable[[t.List[t.Tuple[str, str]]], None]) -> None
"""Register the given payload callback."""
self.payload_callbacks.append(callback)
@cache
def data_context(): # type: () -> DataContext
"""Initialize provider plugins."""
provider_types = (
'layout',
'source',
)
for provider_type in provider_types:
import_plugins('provider/%s' % provider_type)
try:
context = DataContext()
except ProviderNotFoundForPath:
options = [
' - an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/',
]
if ANSIBLE_SOURCE_ROOT:
options.insert(0, ' - the Ansible source: %s/' % ANSIBLE_SOURCE_ROOT)
raise ApplicationError('''The current working directory must be at or below:
%s
Current working directory: %s''' % ('\n'.join(options), os.getcwd()))
return context
@dataclasses.dataclass(frozen=True)
class PluginInfo:
"""Information about an Ansible plugin."""
plugin_type: str
name: str
paths: t.List[str]
@cache
def content_plugins():
"""
Analyze content.
The primary purpose of this analysis is to facilitiate mapping of integration tests to the plugin(s) they are intended to test.
"""
plugins = {} # type: t.Dict[str, t.Dict[str, PluginInfo]]
for plugin_type, plugin_directory in data_context().content.plugin_paths.items():
plugin_paths = sorted(data_context().content.walk_files(plugin_directory))
plugin_directory_offset = len(plugin_directory.split(os.path.sep))
plugin_files = {}
for plugin_path in plugin_paths:
plugin_filename = os.path.basename(plugin_path)
plugin_parts = plugin_path.split(os.path.sep)[plugin_directory_offset:-1]
if plugin_filename == '__init__.py':
if plugin_type != 'module_utils':
continue
else:
plugin_name = os.path.splitext(plugin_filename)[0]
if data_context().content.is_ansible and plugin_type == 'modules':
plugin_name = plugin_name.lstrip('_')
plugin_parts.append(plugin_name)
plugin_name = '.'.join(plugin_parts)
plugin_files.setdefault(plugin_name, []).append(plugin_filename)
plugins[plugin_type] = {plugin_name: PluginInfo(
plugin_type=plugin_type,
name=plugin_name,
paths=paths,
) for plugin_name, paths in plugin_files.items()}
return plugins
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 64,523 |
ansible-test --help reports an error if cwd is not a collection path
|
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
The `ansible-test --help` command will fail with an error if you are not inside a collection path.
It's a bit hard as an introduction :) We should be able to get help from anywhere.
##### ISSUE TYPE
Feature Idea
##### COMPONENT NAME
ansible-test
##### ANSIBLE VERSION
```
ansible 2.9.0
config file = None
configured module search path = ['/Users/remi.rey/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/remi.rey/.virtualenvs/ansible/lib/python3.7/site-packages/ansible
executable location = /Users/remi.rey/.virtualenvs/ansible/bin/ansible
python version = 3.7.4 (default, Sep 7 2019, 18:27:02) [Clang 10.0.1 (clang-1001.0.46.4)]
```
##### CONFIGURATION
```paste below
N/A
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
N/A
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```
$ pip install ansible==2.9
$ ansible-test --help
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
```
$ ansible-test --help
usage: ansible-test [-h] COMMAND ...
positional arguments:
COMMAND
integration posix integration tests
network-integration
network integration tests
windows-integration
windows integration tests
units unit tests
sanity sanity tests
shell open an interactive shell
coverage code coverage management and reporting
env show information about the test environment
optional arguments:
-h, --help show this help message and exit
Install the "argcomplete" python package to enable tab completion.
```
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```
$ ansible-test --help
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /Users/remi.rey
```
|
https://github.com/ansible/ansible/issues/64523
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2019-11-06T16:33:23Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/provider/layout/__init__.py
|
"""Code for finding content."""
from __future__ import annotations
import abc
import collections
import os
import typing as t
from ...util import (
ANSIBLE_SOURCE_ROOT,
)
from .. import (
PathProvider,
)
class Layout:
"""Description of content locations and helper methods to access content."""
def __init__(self,
root, # type: str
paths, # type: t.List[str]
): # type: (...) -> None
self.root = root
self.__paths = paths # contains both file paths and symlinked directory paths (ending with os.path.sep)
self.__files = [path for path in paths if not path.endswith(os.path.sep)] # contains only file paths
self.__paths_tree = paths_to_tree(self.__paths)
self.__files_tree = paths_to_tree(self.__files)
def all_files(self, include_symlinked_directories=False): # type: (bool) -> t.List[str]
"""Return a list of all file paths."""
if include_symlinked_directories:
return self.__paths
return self.__files
def walk_files(self, directory, include_symlinked_directories=False): # type: (str, bool) -> t.List[str]
"""Return a list of file paths found recursively under the given directory."""
if include_symlinked_directories:
tree = self.__paths_tree
else:
tree = self.__files_tree
parts = directory.rstrip(os.path.sep).split(os.path.sep)
item = get_tree_item(tree, parts)
if not item:
return []
directories = collections.deque(item[0].values())
files = list(item[1])
while directories:
item = directories.pop()
directories.extend(item[0].values())
files.extend(item[1])
return files
def get_dirs(self, directory): # type: (str) -> t.List[str]
"""Return a list directory paths found directly under the given directory."""
parts = directory.rstrip(os.path.sep).split(os.path.sep)
item = get_tree_item(self.__files_tree, parts)
return [os.path.join(directory, key) for key in item[0].keys()] if item else []
def get_files(self, directory): # type: (str) -> t.List[str]
"""Return a list of file paths found directly under the given directory."""
parts = directory.rstrip(os.path.sep).split(os.path.sep)
item = get_tree_item(self.__files_tree, parts)
return item[1] if item else []
class ContentLayout(Layout):
"""Information about the current Ansible content being tested."""
def __init__(self,
root, # type: str
paths, # type: t.List[str]
plugin_paths, # type: t.Dict[str, str]
collection, # type: t.Optional[CollectionDetail]
test_path, # type: str
results_path, # type: str
sanity_path, # type: str
sanity_messages, # type: t.Optional[LayoutMessages]
integration_path, # type: str
integration_targets_path, # type: str
integration_vars_path, # type: str
integration_messages, # type: t.Optional[LayoutMessages]
unit_path, # type: str
unit_module_path, # type: str
unit_module_utils_path, # type: str
unit_messages, # type: t.Optional[LayoutMessages]
): # type: (...) -> None
super().__init__(root, paths)
self.plugin_paths = plugin_paths
self.collection = collection
self.test_path = test_path
self.results_path = results_path
self.sanity_path = sanity_path
self.sanity_messages = sanity_messages
self.integration_path = integration_path
self.integration_targets_path = integration_targets_path
self.integration_vars_path = integration_vars_path
self.integration_messages = integration_messages
self.unit_path = unit_path
self.unit_module_path = unit_module_path
self.unit_module_utils_path = unit_module_utils_path
self.unit_messages = unit_messages
self.is_ansible = root == ANSIBLE_SOURCE_ROOT
@property
def prefix(self): # type: () -> str
"""Return the collection prefix or an empty string if not a collection."""
if self.collection:
return self.collection.prefix
return ''
@property
def module_path(self): # type: () -> t.Optional[str]
"""Return the path where modules are found, if any."""
return self.plugin_paths.get('modules')
@property
def module_utils_path(self): # type: () -> t.Optional[str]
"""Return the path where module_utils are found, if any."""
return self.plugin_paths.get('module_utils')
@property
def module_utils_powershell_path(self): # type: () -> t.Optional[str]
"""Return the path where powershell module_utils are found, if any."""
if self.is_ansible:
return os.path.join(self.plugin_paths['module_utils'], 'powershell')
return self.plugin_paths.get('module_utils')
@property
def module_utils_csharp_path(self): # type: () -> t.Optional[str]
"""Return the path where csharp module_utils are found, if any."""
if self.is_ansible:
return os.path.join(self.plugin_paths['module_utils'], 'csharp')
return self.plugin_paths.get('module_utils')
class LayoutMessages:
"""Messages generated during layout creation that should be deferred for later display."""
def __init__(self):
self.info = [] # type: t.List[str]
self.warning = [] # type: t.List[str]
self.error = [] # type: t.List[str]
class CollectionDetail:
"""Details about the layout of the current collection."""
def __init__(self,
name, # type: str
namespace, # type: str
root, # type: str
): # type: (...) -> None
self.name = name
self.namespace = namespace
self.root = root
self.full_name = '%s.%s' % (namespace, name)
self.prefix = '%s.' % self.full_name
self.directory = os.path.join('ansible_collections', namespace, name)
class LayoutProvider(PathProvider):
"""Base class for layout providers."""
PLUGIN_TYPES = (
'action',
'become',
'cache',
'callback',
'cliconf',
'connection',
'doc_fragments',
'filter',
'httpapi',
'inventory',
'lookup',
'module_utils',
'modules',
'netconf',
'shell',
'strategy',
'terminal',
'test',
'vars',
# The following are plugin directories not directly supported by ansible-core, but used in collections
# (https://github.com/ansible-collections/overview/blob/main/collection_requirements.rst#modules--plugins)
'plugin_utils',
'sub_plugins',
)
@abc.abstractmethod
def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
"""Create a layout using the given root and paths."""
def paths_to_tree(paths): # type: (t.List[str]) -> t.Tuple[t.Dict[str, t.Any], t.List[str]]
"""Return a filesystem tree from the given list of paths."""
tree = {}, []
for path in paths:
parts = path.split(os.path.sep)
root = tree
for part in parts[:-1]:
if part not in root[0]:
root[0][part] = {}, []
root = root[0][part]
root[1].append(path)
return tree
def get_tree_item(tree, parts): # type: (t.Tuple[t.Dict[str, t.Any], t.List[str]], t.List[str]) -> t.Optional[t.Tuple[t.Dict[str, t.Any], t.List[str]]]
"""Return the portion of the tree found under the path given by parts, or None if it does not exist."""
root = tree
for part in parts:
root = root[0].get(part)
if not root:
return None
return root
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 64,523 |
ansible-test --help reports an error if cwd is not a collection path
|
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
The `ansible-test --help` command will fail with an error if you are not inside a collection path.
It's a bit hard as an introduction :) We should be able to get help from anywhere.
##### ISSUE TYPE
Feature Idea
##### COMPONENT NAME
ansible-test
##### ANSIBLE VERSION
```
ansible 2.9.0
config file = None
configured module search path = ['/Users/remi.rey/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/remi.rey/.virtualenvs/ansible/lib/python3.7/site-packages/ansible
executable location = /Users/remi.rey/.virtualenvs/ansible/bin/ansible
python version = 3.7.4 (default, Sep 7 2019, 18:27:02) [Clang 10.0.1 (clang-1001.0.46.4)]
```
##### CONFIGURATION
```paste below
N/A
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
N/A
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```
$ pip install ansible==2.9
$ ansible-test --help
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
```
$ ansible-test --help
usage: ansible-test [-h] COMMAND ...
positional arguments:
COMMAND
integration posix integration tests
network-integration
network integration tests
windows-integration
windows integration tests
units unit tests
sanity sanity tests
shell open an interactive shell
coverage code coverage management and reporting
env show information about the test environment
optional arguments:
-h, --help show this help message and exit
Install the "argcomplete" python package to enable tab completion.
```
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```
$ ansible-test --help
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /Users/remi.rey
```
|
https://github.com/ansible/ansible/issues/64523
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2019-11-06T16:33:23Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/provider/layout/unsupported.py
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 64,523 |
ansible-test --help reports an error if cwd is not a collection path
|
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
The `ansible-test --help` command will fail with an error if you are not inside a collection path.
It's a bit hard as an introduction :) We should be able to get help from anywhere.
##### ISSUE TYPE
Feature Idea
##### COMPONENT NAME
ansible-test
##### ANSIBLE VERSION
```
ansible 2.9.0
config file = None
configured module search path = ['/Users/remi.rey/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /Users/remi.rey/.virtualenvs/ansible/lib/python3.7/site-packages/ansible
executable location = /Users/remi.rey/.virtualenvs/ansible/bin/ansible
python version = 3.7.4 (default, Sep 7 2019, 18:27:02) [Clang 10.0.1 (clang-1001.0.46.4)]
```
##### CONFIGURATION
```paste below
N/A
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
N/A
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```
$ pip install ansible==2.9
$ ansible-test --help
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
```
$ ansible-test --help
usage: ansible-test [-h] COMMAND ...
positional arguments:
COMMAND
integration posix integration tests
network-integration
network integration tests
windows-integration
windows integration tests
units unit tests
sanity sanity tests
shell open an interactive shell
coverage code coverage management and reporting
env show information about the test environment
optional arguments:
-h, --help show this help message and exit
Install the "argcomplete" python package to enable tab completion.
```
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes -->
```
$ ansible-test --help
ERROR: The current working directory must be at or below:
- an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/
Current working directory: /Users/remi.rey
```
|
https://github.com/ansible/ansible/issues/64523
|
https://github.com/ansible/ansible/pull/76866
|
07bcd13e6fdc545e4fcbfd9fd5c0052a063a0df6
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
| 2019-11-06T16:33:23Z |
python
| 2022-01-27T20:32:11Z |
test/lib/ansible_test/_internal/provider/source/unsupported.py
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 62,079 |
collections sanity tests should start by verifying its a valid collection
|
currently an invalid collection will error out on docs sanity, but this is misleading since the problem is that the collection name itself is invalid, a previous test verifying this should be introduced to avoid confusion.
example currently:
```
Running sanity test 'ansible-doc' with Python 3.7
ERROR: Command "ansible-doc -t module cyberark.ansible-security-automation-collection.cyberark_account cyberark.ansible-security-automation-collection.cyberark_authentication cyberark.ansible-security-automation-collection.cyberark_credential cyberark.ansible-security-automation-collection.cyberark_user" returned exit status 250.
>>> Standard Error
[WARNING]: module cyberark.ansible-security-automation-collection.cyberark_account not found in: /dev/null:/home/abehl/work/src/anshul_ansible/ansible/lib/ansible/modules
ERROR! Unexpected Exception, this is probably a bug: cannot unpack non-iterable NoneType object
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
ansible-test
|
https://github.com/ansible/ansible/issues/62079
|
https://github.com/ansible/ansible/pull/76869
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
|
26b43f425f6c04732818f600e1cf4bcb9a56f89a
| 2019-09-10T16:03:06Z |
python
| 2022-01-28T01:24:40Z |
changelogs/fragments/ansible-test-collection-identifier.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 62,079 |
collections sanity tests should start by verifying its a valid collection
|
currently an invalid collection will error out on docs sanity, but this is misleading since the problem is that the collection name itself is invalid, a previous test verifying this should be introduced to avoid confusion.
example currently:
```
Running sanity test 'ansible-doc' with Python 3.7
ERROR: Command "ansible-doc -t module cyberark.ansible-security-automation-collection.cyberark_account cyberark.ansible-security-automation-collection.cyberark_authentication cyberark.ansible-security-automation-collection.cyberark_credential cyberark.ansible-security-automation-collection.cyberark_user" returned exit status 250.
>>> Standard Error
[WARNING]: module cyberark.ansible-security-automation-collection.cyberark_account not found in: /dev/null:/home/abehl/work/src/anshul_ansible/ansible/lib/ansible/modules
ERROR! Unexpected Exception, this is probably a bug: cannot unpack non-iterable NoneType object
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
ansible-test
|
https://github.com/ansible/ansible/issues/62079
|
https://github.com/ansible/ansible/pull/76869
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
|
26b43f425f6c04732818f600e1cf4bcb9a56f89a
| 2019-09-10T16:03:06Z |
python
| 2022-01-28T01:24:40Z |
test/lib/ansible_test/_internal/data.py
|
"""Context information for the current invocation of ansible-test."""
from __future__ import annotations
import dataclasses
import os
import typing as t
from .util import (
ApplicationError,
import_plugins,
is_subdir,
ANSIBLE_LIB_ROOT,
ANSIBLE_TEST_ROOT,
ANSIBLE_SOURCE_ROOT,
display,
cache,
)
from .provider import (
find_path_provider,
get_path_provider_classes,
ProviderNotFoundForPath,
)
from .provider.source import (
SourceProvider,
)
from .provider.source.unversioned import (
UnversionedSource,
)
from .provider.source.installed import (
InstalledSource,
)
from .provider.source.unsupported import (
UnsupportedSource,
)
from .provider.layout import (
ContentLayout,
LayoutProvider,
)
from .provider.layout.unsupported import (
UnsupportedLayout,
)
class DataContext:
"""Data context providing details about the current execution environment for ansible-test."""
def __init__(self):
content_path = os.environ.get('ANSIBLE_TEST_CONTENT_ROOT')
current_path = os.getcwd()
layout_providers = get_path_provider_classes(LayoutProvider)
source_providers = get_path_provider_classes(SourceProvider)
self.__layout_providers = layout_providers
self.__source_providers = source_providers
self.__ansible_source = None # type: t.Optional[t.Tuple[t.Tuple[str, str], ...]]
self.payload_callbacks = [] # type: t.List[t.Callable[[t.List[t.Tuple[str, str]]], None]]
if content_path:
content = self.__create_content_layout(layout_providers, source_providers, content_path, False)
elif ANSIBLE_SOURCE_ROOT and is_subdir(current_path, ANSIBLE_SOURCE_ROOT):
content = self.__create_content_layout(layout_providers, source_providers, ANSIBLE_SOURCE_ROOT, False)
else:
content = self.__create_content_layout(layout_providers, source_providers, current_path, True)
self.content = content # type: ContentLayout
def create_collection_layouts(self): # type: () -> t.List[ContentLayout]
"""
Return a list of collection layouts, one for each collection in the same collection root as the current collection layout.
An empty list is returned if the current content layout is not a collection layout.
"""
layout = self.content
collection = layout.collection
if not collection:
return []
root_path = os.path.join(collection.root, 'ansible_collections')
display.info('Scanning collection root: %s' % root_path, verbosity=1)
namespace_names = sorted(name for name in os.listdir(root_path) if os.path.isdir(os.path.join(root_path, name)))
collections = []
for namespace_name in namespace_names:
namespace_path = os.path.join(root_path, namespace_name)
collection_names = sorted(name for name in os.listdir(namespace_path) if os.path.isdir(os.path.join(namespace_path, name)))
for collection_name in collection_names:
collection_path = os.path.join(namespace_path, collection_name)
if collection_path == os.path.join(collection.root, collection.directory):
collection_layout = layout
else:
collection_layout = self.__create_content_layout(self.__layout_providers, self.__source_providers, collection_path, False)
file_count = len(collection_layout.all_files())
if not file_count:
continue
display.info('Including collection: %s (%d files)' % (collection_layout.collection.full_name, file_count), verbosity=1)
collections.append(collection_layout)
return collections
@staticmethod
def __create_content_layout(layout_providers, # type: t.List[t.Type[LayoutProvider]]
source_providers, # type: t.List[t.Type[SourceProvider]]
root, # type: str
walk, # type: bool
): # type: (...) -> ContentLayout
"""Create a content layout using the given providers and root path."""
try:
layout_provider = find_path_provider(LayoutProvider, layout_providers, root, walk)
except ProviderNotFoundForPath:
layout_provider = UnsupportedLayout(root)
try:
# Begin the search for the source provider at the layout provider root.
# This intentionally ignores version control within subdirectories of the layout root, a condition which was previously an error.
# Doing so allows support for older git versions for which it is difficult to distinguish between a super project and a sub project.
# It also provides a better user experience, since the solution for the user would effectively be the same -- to remove the nested version control.
if isinstance(layout_provider, UnsupportedLayout):
source_provider = UnsupportedSource(layout_provider.root)
else:
source_provider = find_path_provider(SourceProvider, source_providers, layout_provider.root, walk)
except ProviderNotFoundForPath:
source_provider = UnversionedSource(layout_provider.root)
layout = layout_provider.create(layout_provider.root, source_provider.get_paths(layout_provider.root))
return layout
def __create_ansible_source(self):
"""Return a tuple of Ansible source files with both absolute and relative paths."""
if not ANSIBLE_SOURCE_ROOT:
sources = []
source_provider = InstalledSource(ANSIBLE_LIB_ROOT)
sources.extend((os.path.join(source_provider.root, path), os.path.join('lib', 'ansible', path))
for path in source_provider.get_paths(source_provider.root))
source_provider = InstalledSource(ANSIBLE_TEST_ROOT)
sources.extend((os.path.join(source_provider.root, path), os.path.join('test', 'lib', 'ansible_test', path))
for path in source_provider.get_paths(source_provider.root))
return tuple(sources)
if self.content.is_ansible:
return tuple((os.path.join(self.content.root, path), path) for path in self.content.all_files())
try:
source_provider = find_path_provider(SourceProvider, self.__source_providers, ANSIBLE_SOURCE_ROOT, False)
except ProviderNotFoundForPath:
source_provider = UnversionedSource(ANSIBLE_SOURCE_ROOT)
return tuple((os.path.join(source_provider.root, path), path) for path in source_provider.get_paths(source_provider.root))
@property
def ansible_source(self): # type: () -> t.Tuple[t.Tuple[str, str], ...]
"""Return a tuple of Ansible source files with both absolute and relative paths."""
if not self.__ansible_source:
self.__ansible_source = self.__create_ansible_source()
return self.__ansible_source
def register_payload_callback(self, callback): # type: (t.Callable[[t.List[t.Tuple[str, str]]], None]) -> None
"""Register the given payload callback."""
self.payload_callbacks.append(callback)
def check_layout(self) -> None:
"""Report an error if the layout is unsupported."""
if self.content.unsupported:
raise ApplicationError(self.explain_working_directory())
@staticmethod
def explain_working_directory() -> str:
"""Return a message explaining the working directory requirements."""
blocks = [
'The current working directory must be within the source tree being tested.',
'',
]
if ANSIBLE_SOURCE_ROOT:
blocks.append(f'Testing Ansible: {ANSIBLE_SOURCE_ROOT}/')
blocks.append('')
cwd = os.getcwd()
blocks.append('Testing an Ansible collection: {...}/ansible_collections/{namespace}/{collection}/')
blocks.append('Example #1: community.general -> ~/code/ansible_collections/community/general/')
blocks.append('Example #2: ansible.util -> ~/.ansible/collections/ansible_collections/ansible/util/')
blocks.append('')
blocks.append(f'Current working directory: {cwd}/')
if os.path.basename(os.path.dirname(cwd)) == 'ansible_collections':
blocks.append(f'Expected parent directory: {os.path.dirname(cwd)}/{{namespace}}/{{collection}}/')
elif os.path.basename(cwd) == 'ansible_collections':
blocks.append(f'Expected parent directory: {cwd}/{{namespace}}/{{collection}}/')
else:
blocks.append('No "ansible_collections" parent directory was found.')
message = '\n'.join(blocks)
return message
@cache
def data_context(): # type: () -> DataContext
"""Initialize provider plugins."""
provider_types = (
'layout',
'source',
)
for provider_type in provider_types:
import_plugins('provider/%s' % provider_type)
context = DataContext()
return context
@dataclasses.dataclass(frozen=True)
class PluginInfo:
"""Information about an Ansible plugin."""
plugin_type: str
name: str
paths: t.List[str]
@cache
def content_plugins():
"""
Analyze content.
The primary purpose of this analysis is to facilitiate mapping of integration tests to the plugin(s) they are intended to test.
"""
plugins = {} # type: t.Dict[str, t.Dict[str, PluginInfo]]
for plugin_type, plugin_directory in data_context().content.plugin_paths.items():
plugin_paths = sorted(data_context().content.walk_files(plugin_directory))
plugin_directory_offset = len(plugin_directory.split(os.path.sep))
plugin_files = {}
for plugin_path in plugin_paths:
plugin_filename = os.path.basename(plugin_path)
plugin_parts = plugin_path.split(os.path.sep)[plugin_directory_offset:-1]
if plugin_filename == '__init__.py':
if plugin_type != 'module_utils':
continue
else:
plugin_name = os.path.splitext(plugin_filename)[0]
if data_context().content.is_ansible and plugin_type == 'modules':
plugin_name = plugin_name.lstrip('_')
plugin_parts.append(plugin_name)
plugin_name = '.'.join(plugin_parts)
plugin_files.setdefault(plugin_name, []).append(plugin_filename)
plugins[plugin_type] = {plugin_name: PluginInfo(
plugin_type=plugin_type,
name=plugin_name,
paths=paths,
) for plugin_name, paths in plugin_files.items()}
return plugins
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 62,079 |
collections sanity tests should start by verifying its a valid collection
|
currently an invalid collection will error out on docs sanity, but this is misleading since the problem is that the collection name itself is invalid, a previous test verifying this should be introduced to avoid confusion.
example currently:
```
Running sanity test 'ansible-doc' with Python 3.7
ERROR: Command "ansible-doc -t module cyberark.ansible-security-automation-collection.cyberark_account cyberark.ansible-security-automation-collection.cyberark_authentication cyberark.ansible-security-automation-collection.cyberark_credential cyberark.ansible-security-automation-collection.cyberark_user" returned exit status 250.
>>> Standard Error
[WARNING]: module cyberark.ansible-security-automation-collection.cyberark_account not found in: /dev/null:/home/abehl/work/src/anshul_ansible/ansible/lib/ansible/modules
ERROR! Unexpected Exception, this is probably a bug: cannot unpack non-iterable NoneType object
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
ansible-test
|
https://github.com/ansible/ansible/issues/62079
|
https://github.com/ansible/ansible/pull/76869
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
|
26b43f425f6c04732818f600e1cf4bcb9a56f89a
| 2019-09-10T16:03:06Z |
python
| 2022-01-28T01:24:40Z |
test/lib/ansible_test/_internal/provider/layout/collection.py
|
"""Layout provider for Ansible collections."""
from __future__ import annotations
import os
import typing as t
from . import (
ContentLayout,
LayoutProvider,
CollectionDetail,
LayoutMessages,
)
class CollectionLayout(LayoutProvider):
"""Layout provider for Ansible collections."""
@staticmethod
def is_content_root(path): # type: (str) -> bool
"""Return True if the given path is a content root for this provider."""
if os.path.basename(os.path.dirname(os.path.dirname(path))) == 'ansible_collections':
return True
return False
def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
"""Create a Layout using the given root and paths."""
plugin_paths = dict((p, os.path.join('plugins', p)) for p in self.PLUGIN_TYPES)
collection_root = os.path.dirname(os.path.dirname(root))
collection_dir = os.path.relpath(root, collection_root)
collection_namespace, collection_name = collection_dir.split(os.path.sep)
collection_root = os.path.dirname(collection_root)
sanity_messages = LayoutMessages()
integration_messages = LayoutMessages()
unit_messages = LayoutMessages()
# these apply to all test commands
self.__check_test_path(paths, sanity_messages)
self.__check_test_path(paths, integration_messages)
self.__check_test_path(paths, unit_messages)
# these apply to specific test commands
integration_targets_path = self.__check_integration_path(paths, integration_messages)
self.__check_unit_path(paths, unit_messages)
return ContentLayout(root,
paths,
plugin_paths=plugin_paths,
collection=CollectionDetail(
name=collection_name,
namespace=collection_namespace,
root=collection_root,
),
test_path='tests',
results_path='tests/output',
sanity_path='tests/sanity',
sanity_messages=sanity_messages,
integration_path='tests/integration',
integration_targets_path=integration_targets_path.rstrip(os.path.sep),
integration_vars_path='tests/integration/integration_config.yml',
integration_messages=integration_messages,
unit_path='tests/unit',
unit_module_path='tests/unit/plugins/modules',
unit_module_utils_path='tests/unit/plugins/module_utils',
unit_messages=unit_messages,
)
@staticmethod
def __check_test_path(paths, messages): # type: (t.List[str], LayoutMessages) -> None
modern_test_path = 'tests/'
modern_test_path_found = any(path.startswith(modern_test_path) for path in paths)
legacy_test_path = 'test/'
legacy_test_path_found = any(path.startswith(legacy_test_path) for path in paths)
if modern_test_path_found and legacy_test_path_found:
messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_test_path, modern_test_path))
elif legacy_test_path_found:
messages.warning.append('Ignoring tests in "%s" that should be in "%s".' % (legacy_test_path, modern_test_path))
@staticmethod
def __check_integration_path(paths, messages): # type: (t.List[str], LayoutMessages) -> str
modern_integration_path = 'roles/test/'
modern_integration_path_found = any(path.startswith(modern_integration_path) for path in paths)
legacy_integration_path = 'tests/integration/targets/'
legacy_integration_path_found = any(path.startswith(legacy_integration_path) for path in paths)
if modern_integration_path_found and legacy_integration_path_found:
messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_integration_path, modern_integration_path))
integration_targets_path = modern_integration_path
elif legacy_integration_path_found:
messages.info.append('Falling back to tests in "%s" because "%s" was not found.' % (legacy_integration_path, modern_integration_path))
integration_targets_path = legacy_integration_path
elif modern_integration_path_found:
messages.info.append('Loading tests from "%s".' % modern_integration_path)
integration_targets_path = modern_integration_path
else:
messages.error.append('Cannot run integration tests without "%s" or "%s".' % (modern_integration_path, legacy_integration_path))
integration_targets_path = modern_integration_path
return integration_targets_path
@staticmethod
def __check_unit_path(paths, messages): # type: (t.List[str], LayoutMessages) -> None
modern_unit_path = 'tests/unit/'
modern_unit_path_found = any(path.startswith(modern_unit_path) for path in paths)
legacy_unit_path = 'tests/units/' # test/units/ will be covered by the warnings for test/ vs tests/
legacy_unit_path_found = any(path.startswith(legacy_unit_path) for path in paths)
if modern_unit_path_found and legacy_unit_path_found:
messages.warning.append('Ignoring tests in "%s" in favor of "%s".' % (legacy_unit_path, modern_unit_path))
elif legacy_unit_path_found:
messages.warning.append('Rename "%s" to "%s" to run unit tests.' % (legacy_unit_path, modern_unit_path))
elif modern_unit_path_found:
pass # unit tests only run from one directory so no message is needed
else:
messages.error.append('Cannot run unit tests without "%s".' % modern_unit_path)
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 62,079 |
collections sanity tests should start by verifying its a valid collection
|
currently an invalid collection will error out on docs sanity, but this is misleading since the problem is that the collection name itself is invalid, a previous test verifying this should be introduced to avoid confusion.
example currently:
```
Running sanity test 'ansible-doc' with Python 3.7
ERROR: Command "ansible-doc -t module cyberark.ansible-security-automation-collection.cyberark_account cyberark.ansible-security-automation-collection.cyberark_authentication cyberark.ansible-security-automation-collection.cyberark_credential cyberark.ansible-security-automation-collection.cyberark_user" returned exit status 250.
>>> Standard Error
[WARNING]: module cyberark.ansible-security-automation-collection.cyberark_account not found in: /dev/null:/home/abehl/work/src/anshul_ansible/ansible/lib/ansible/modules
ERROR! Unexpected Exception, this is probably a bug: cannot unpack non-iterable NoneType object
```
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
ansible-test
|
https://github.com/ansible/ansible/issues/62079
|
https://github.com/ansible/ansible/pull/76869
|
de5f60e374524de13fe079b52282cd7a9eeabd5f
|
26b43f425f6c04732818f600e1cf4bcb9a56f89a
| 2019-09-10T16:03:06Z |
python
| 2022-01-28T01:24:40Z |
test/lib/ansible_test/_internal/util.py
|
"""Miscellaneous utility functions and classes."""
from __future__ import annotations
import errno
# noinspection PyCompatibility
import fcntl
import importlib.util
import inspect
import os
import pkgutil
import random
import re
import shutil
import socket
import stat
import string
import subprocess
import sys
import time
import functools
import shlex
import typing as t
from struct import unpack, pack
# noinspection PyCompatibility
from termios import TIOCGWINSZ
from .encoding import (
to_bytes,
to_optional_bytes,
to_optional_text,
)
from .io import (
open_binary_file,
read_text_file,
)
from .thread import (
mutex,
)
from .constants import (
SUPPORTED_PYTHON_VERSIONS,
)
C = t.TypeVar('C')
TType = t.TypeVar('TType')
TKey = t.TypeVar('TKey')
TValue = t.TypeVar('TValue')
PYTHON_PATHS = {} # type: t.Dict[str, str]
try:
# noinspection PyUnresolvedReferences
MAXFD = subprocess.MAXFD
except AttributeError:
MAXFD = -1
COVERAGE_CONFIG_NAME = 'coveragerc'
ANSIBLE_TEST_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# assume running from install
ANSIBLE_ROOT = os.path.dirname(ANSIBLE_TEST_ROOT)
ANSIBLE_BIN_PATH = os.path.dirname(os.path.abspath(sys.argv[0]))
ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'ansible')
ANSIBLE_SOURCE_ROOT = None
if not os.path.exists(ANSIBLE_LIB_ROOT):
# running from source
ANSIBLE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(ANSIBLE_TEST_ROOT)))
ANSIBLE_BIN_PATH = os.path.join(ANSIBLE_ROOT, 'bin')
ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'lib', 'ansible')
ANSIBLE_SOURCE_ROOT = ANSIBLE_ROOT
ANSIBLE_TEST_DATA_ROOT = os.path.join(ANSIBLE_TEST_ROOT, '_data')
ANSIBLE_TEST_UTIL_ROOT = os.path.join(ANSIBLE_TEST_ROOT, '_util')
ANSIBLE_TEST_CONFIG_ROOT = os.path.join(ANSIBLE_TEST_ROOT, 'config')
ANSIBLE_TEST_CONTROLLER_ROOT = os.path.join(ANSIBLE_TEST_UTIL_ROOT, 'controller')
ANSIBLE_TEST_TARGET_ROOT = os.path.join(ANSIBLE_TEST_UTIL_ROOT, 'target')
ANSIBLE_TEST_TOOLS_ROOT = os.path.join(ANSIBLE_TEST_CONTROLLER_ROOT, 'tools')
# Modes are set to allow all users the same level of access.
# This permits files to be used in tests that change users.
# The only exception is write access to directories for the user creating them.
# This avoids having to modify the directory permissions a second time.
MODE_READ = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
MODE_FILE = MODE_READ
MODE_FILE_EXECUTE = MODE_FILE | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
MODE_FILE_WRITE = MODE_FILE | stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
MODE_DIRECTORY = MODE_READ | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
MODE_DIRECTORY_WRITE = MODE_DIRECTORY | stat.S_IWGRP | stat.S_IWOTH
def cache(func): # type: (t.Callable[[], TValue]) -> t.Callable[[], TValue]
"""Enforce exclusive access on a decorated function and cache the result."""
storage = {} # type: t.Dict[None, TValue]
sentinel = object()
@functools.wraps(func)
def cache_func():
"""Cache the return value from func."""
if (value := storage.get(None, sentinel)) is sentinel:
value = storage[None] = func()
return value
wrapper = mutex(cache_func)
return wrapper
def filter_args(args, filters): # type: (t.List[str], t.Dict[str, int]) -> t.List[str]
"""Return a filtered version of the given command line arguments."""
remaining = 0
result = []
for arg in args:
if not arg.startswith('-') and remaining:
remaining -= 1
continue
remaining = 0
parts = arg.split('=', 1)
key = parts[0]
if key in filters:
remaining = filters[key] - len(parts) + 1
continue
result.append(arg)
return result
def read_lines_without_comments(path, remove_blank_lines=False, optional=False): # type: (str, bool, bool) -> t.List[str]
"""
Returns lines from the specified text file with comments removed.
Comments are any content from a hash symbol to the end of a line.
Any spaces immediately before a comment are also removed.
"""
if optional and not os.path.exists(path):
return []
lines = read_text_file(path).splitlines()
lines = [re.sub(r' *#.*$', '', line) for line in lines]
if remove_blank_lines:
lines = [line for line in lines if line]
return lines
def exclude_none_values(data): # type: (t.Dict[TKey, t.Optional[TValue]]) -> t.Dict[TKey, TValue]
"""Return the provided dictionary with any None values excluded."""
return dict((key, value) for key, value in data.items() if value is not None)
def find_executable(executable, cwd=None, path=None, required=True): # type: (str, t.Optional[str], t.Optional[str], t.Union[bool, str]) -> t.Optional[str]
"""
Find the specified executable and return the full path, or None if it could not be found.
If required is True an exception will be raised if the executable is not found.
If required is set to 'warning' then a warning will be shown if the executable is not found.
"""
match = None
real_cwd = os.getcwd()
if not cwd:
cwd = real_cwd
if os.path.dirname(executable):
target = os.path.join(cwd, executable)
if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK):
match = executable
else:
if path is None:
path = os.environ.get('PATH', os.path.defpath)
if path:
path_dirs = path.split(os.path.pathsep)
seen_dirs = set()
for path_dir in path_dirs:
if path_dir in seen_dirs:
continue
seen_dirs.add(path_dir)
if os.path.abspath(path_dir) == real_cwd:
path_dir = cwd
candidate = os.path.join(path_dir, executable)
if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
match = candidate
break
if not match and required:
message = 'Required program "%s" not found.' % executable
if required != 'warning':
raise ApplicationError(message)
display.warning(message)
return match
def find_python(version, path=None, required=True): # type: (str, t.Optional[str], bool) -> t.Optional[str]
"""
Find and return the full path to the specified Python version.
If required, an exception will be raised not found.
If not required, None will be returned if not found.
"""
version_info = str_to_version(version)
if not path and version_info == sys.version_info[:len(version_info)]:
python_bin = sys.executable
else:
python_bin = find_executable('python%s' % version, path=path, required=required)
return python_bin
@cache
def get_ansible_version(): # type: () -> str
"""Return the Ansible version."""
# ansible may not be in our sys.path
# avoids a symlink to release.py since ansible placement relative to ansible-test may change during delegation
load_module(os.path.join(ANSIBLE_LIB_ROOT, 'release.py'), 'ansible_release')
# noinspection PyUnresolvedReferences
from ansible_release import __version__ as ansible_version # pylint: disable=import-error
return ansible_version
@cache
def get_available_python_versions(): # type: () -> t.Dict[str, str]
"""Return a dictionary indicating which supported Python versions are available."""
return dict((version, path) for version, path in ((version, find_python(version, required=False)) for version in SUPPORTED_PYTHON_VERSIONS) if path)
def raw_command(
cmd, # type: t.Iterable[str]
capture=False, # type: bool
env=None, # type: t.Optional[t.Dict[str, str]]
data=None, # type: t.Optional[str]
cwd=None, # type: t.Optional[str]
explain=False, # type: bool
stdin=None, # type: t.Optional[t.BinaryIO]
stdout=None, # type: t.Optional[t.BinaryIO]
cmd_verbosity=1, # type: int
str_errors='strict', # type: str
error_callback=None, # type: t.Optional[t.Callable[[SubprocessError], None]]
): # type: (...) -> t.Tuple[t.Optional[str], t.Optional[str]]
"""Run the specified command and return stdout and stderr as a tuple."""
if not cwd:
cwd = os.getcwd()
if not env:
env = common_environment()
cmd = list(cmd)
escaped_cmd = ' '.join(shlex.quote(c) for c in cmd)
display.info('Run command: %s' % escaped_cmd, verbosity=cmd_verbosity, truncate=True)
display.info('Working directory: %s' % cwd, verbosity=2)
program = find_executable(cmd[0], cwd=cwd, path=env['PATH'], required='warning')
if program:
display.info('Program found: %s' % program, verbosity=2)
for key in sorted(env.keys()):
display.info('%s=%s' % (key, env[key]), verbosity=2)
if explain:
return None, None
communicate = False
if stdin is not None:
data = None
communicate = True
elif data is not None:
stdin = subprocess.PIPE
communicate = True
if stdout:
communicate = True
if capture:
stdout = stdout or subprocess.PIPE
stderr = subprocess.PIPE
communicate = True
else:
stderr = None
start = time.time()
process = None
try:
try:
cmd_bytes = [to_bytes(c) for c in cmd]
env_bytes = dict((to_bytes(k), to_bytes(v)) for k, v in env.items())
process = subprocess.Popen(cmd_bytes, env=env_bytes, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd) # pylint: disable=consider-using-with
except OSError as ex:
if ex.errno == errno.ENOENT:
raise ApplicationError('Required program "%s" not found.' % cmd[0])
raise
if communicate:
data_bytes = to_optional_bytes(data)
stdout_bytes, stderr_bytes = process.communicate(data_bytes)
stdout_text = to_optional_text(stdout_bytes, str_errors) or u''
stderr_text = to_optional_text(stderr_bytes, str_errors) or u''
else:
process.wait()
stdout_text, stderr_text = None, None
finally:
if process and process.returncode is None:
process.kill()
display.info('') # the process we're interrupting may have completed a partial line of output
display.notice('Killed command to avoid an orphaned child process during handling of an unexpected exception.')
status = process.returncode
runtime = time.time() - start
display.info('Command exited with status %s after %s seconds.' % (status, runtime), verbosity=4)
if status == 0:
return stdout_text, stderr_text
raise SubprocessError(cmd, status, stdout_text, stderr_text, runtime, error_callback)
def common_environment():
"""Common environment used for executing all programs."""
env = dict(
LC_ALL='en_US.UTF-8',
PATH=os.environ.get('PATH', os.path.defpath),
)
required = (
'HOME',
)
optional = (
'LD_LIBRARY_PATH',
'SSH_AUTH_SOCK',
# MacOS High Sierra Compatibility
# http://sealiesoftware.com/blog/archive/2017/6/5/Objective-C_and_fork_in_macOS_1013.html
# Example configuration for macOS:
# export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
'OBJC_DISABLE_INITIALIZE_FORK_SAFETY',
'ANSIBLE_KEEP_REMOTE_FILES',
# MacOS Homebrew Compatibility
# https://cryptography.io/en/latest/installation/#building-cryptography-on-macos
# This may also be required to install pyyaml with libyaml support when installed in non-standard locations.
# Example configuration for brew on macOS:
# export LDFLAGS="-L$(brew --prefix openssl)/lib/ -L$(brew --prefix libyaml)/lib/"
# export CFLAGS="-I$(brew --prefix openssl)/include/ -I$(brew --prefix libyaml)/include/"
'LDFLAGS',
'CFLAGS',
)
# FreeBSD Compatibility
# This is required to include libyaml support in PyYAML.
# The header /usr/local/include/yaml.h isn't in the default include path for the compiler.
# It is included here so that tests can take advantage of it, rather than only ansible-test during managed pip installs.
# If CFLAGS has been set in the environment that value will take precedence due to being an optional var when calling pass_vars.
if os.path.exists('/etc/freebsd-update.conf'):
env.update(CFLAGS='-I/usr/local/include/')
env.update(pass_vars(required=required, optional=optional))
return env
def pass_vars(required, optional): # type: (t.Collection[str], t.Collection[str]) -> t.Dict[str, str]
"""Return a filtered dictionary of environment variables based on the current environment."""
env = {}
for name in required:
if name not in os.environ:
raise MissingEnvironmentVariable(name)
env[name] = os.environ[name]
for name in optional:
if name not in os.environ:
continue
env[name] = os.environ[name]
return env
def remove_tree(path): # type: (str) -> None
"""Remove the specified directory, siliently continuing if the directory does not exist."""
try:
shutil.rmtree(to_bytes(path))
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
def is_binary_file(path): # type: (str) -> bool
"""Return True if the specified file is a binary file, otherwise return False."""
assume_text = {
'.cfg',
'.conf',
'.crt',
'.cs',
'.css',
'.html',
'.ini',
'.j2',
'.js',
'.json',
'.md',
'.pem',
'.ps1',
'.psm1',
'.py',
'.rst',
'.sh',
'.txt',
'.xml',
'.yaml',
'.yml',
}
assume_binary = {
'.bin',
'.eot',
'.gz',
'.ico',
'.iso',
'.jpg',
'.otf',
'.p12',
'.png',
'.pyc',
'.rpm',
'.ttf',
'.woff',
'.woff2',
'.zip',
}
ext = os.path.splitext(path)[1]
if ext in assume_text:
return False
if ext in assume_binary:
return True
with open_binary_file(path) as path_fd:
# noinspection PyTypeChecker
return b'\0' in path_fd.read(4096)
def generate_name(length=8): # type: (int) -> str
"""Generate and return a random name."""
return ''.join(random.choice(string.ascii_letters + string.digits) for _idx in range(length))
def generate_password(): # type: () -> str
"""Generate and return random password."""
chars = [
string.ascii_letters,
string.digits,
string.ascii_letters,
string.digits,
'-',
] * 4
password = ''.join([random.choice(char) for char in chars[:-1]])
display.sensitive.add(password)
return password
class Display:
"""Manages color console output."""
clear = '\033[0m'
red = '\033[31m'
green = '\033[32m'
yellow = '\033[33m'
blue = '\033[34m'
purple = '\033[35m'
cyan = '\033[36m'
verbosity_colors = {
0: None,
1: green,
2: blue,
3: cyan,
}
def __init__(self):
self.verbosity = 0
self.color = sys.stdout.isatty()
self.warnings = []
self.warnings_unique = set()
self.info_stderr = False
self.rows = 0
self.columns = 0
self.truncate = 0
self.redact = True
self.sensitive = set()
if os.isatty(0):
self.rows, self.columns = unpack('HHHH', fcntl.ioctl(0, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[:2]
def __warning(self, message): # type: (str) -> None
"""Internal implementation for displaying a warning message."""
self.print_message('WARNING: %s' % message, color=self.purple, fd=sys.stderr)
def review_warnings(self): # type: () -> None
"""Review all warnings which previously occurred."""
if not self.warnings:
return
self.__warning('Reviewing previous %d warning(s):' % len(self.warnings))
for warning in self.warnings:
self.__warning(warning)
def warning(self, message, unique=False, verbosity=0): # type: (str, bool, int) -> None
"""Display a warning level message."""
if verbosity > self.verbosity:
return
if unique:
if message in self.warnings_unique:
return
self.warnings_unique.add(message)
self.__warning(message)
self.warnings.append(message)
def notice(self, message): # type: (str) -> None
"""Display a notice level message."""
self.print_message('NOTICE: %s' % message, color=self.purple, fd=sys.stderr)
def error(self, message): # type: (str) -> None
"""Display an error level message."""
self.print_message('ERROR: %s' % message, color=self.red, fd=sys.stderr)
def info(self, message, verbosity=0, truncate=False): # type: (str, int, bool) -> None
"""Display an info level message."""
if self.verbosity >= verbosity:
color = self.verbosity_colors.get(verbosity, self.yellow)
self.print_message(message, color=color, fd=sys.stderr if self.info_stderr else sys.stdout, truncate=truncate)
def print_message( # pylint: disable=locally-disabled, invalid-name
self,
message, # type: str
color=None, # type: t.Optional[str]
fd=sys.stdout, # type: t.TextIO
truncate=False, # type: bool
): # type: (...) -> None
"""Display a message."""
if self.redact and self.sensitive:
for item in self.sensitive:
if not item:
continue
message = message.replace(item, '*' * len(item))
if truncate:
if len(message) > self.truncate > 5:
message = message[:self.truncate - 5] + ' ...'
if color and self.color:
# convert color resets in message to desired color
message = message.replace(self.clear, color)
message = '%s%s%s' % (color, message, self.clear)
print(message, file=fd)
fd.flush()
class ApplicationError(Exception):
"""General application error."""
class ApplicationWarning(Exception):
"""General application warning which interrupts normal program flow."""
class SubprocessError(ApplicationError):
"""Error resulting from failed subprocess execution."""
def __init__(
self,
cmd, # type: t.List[str]
status=0, # type: int
stdout=None, # type: t.Optional[str]
stderr=None, # type: t.Optional[str]
runtime=None, # type: t.Optional[float]
error_callback=None, # type: t.Optional[t.Callable[[SubprocessError], None]]
): # type: (...) -> None
message = 'Command "%s" returned exit status %s.\n' % (' '.join(shlex.quote(c) for c in cmd), status)
if stderr:
message += '>>> Standard Error\n'
message += '%s%s\n' % (stderr.strip(), Display.clear)
if stdout:
message += '>>> Standard Output\n'
message += '%s%s\n' % (stdout.strip(), Display.clear)
self.cmd = cmd
self.message = message
self.status = status
self.stdout = stdout
self.stderr = stderr
self.runtime = runtime
if error_callback:
error_callback(self)
self.message = self.message.strip()
super().__init__(self.message)
class MissingEnvironmentVariable(ApplicationError):
"""Error caused by missing environment variable."""
def __init__(self, name): # type: (str) -> None
super().__init__('Missing environment variable: %s' % name)
self.name = name
def retry(func, ex_type=SubprocessError, sleep=10, attempts=10):
"""Retry the specified function on failure."""
for dummy in range(1, attempts):
try:
return func()
except ex_type:
time.sleep(sleep)
return func()
def parse_to_list_of_dict(pattern, value): # type: (str, str) -> t.List[t.Dict[str, str]]
"""Parse lines from the given value using the specified pattern and return the extracted list of key/value pair dictionaries."""
matched = []
unmatched = []
for line in value.splitlines():
match = re.search(pattern, line)
if match:
matched.append(match.groupdict())
else:
unmatched.append(line)
if unmatched:
raise Exception('Pattern "%s" did not match values:\n%s' % (pattern, '\n'.join(unmatched)))
return matched
def get_subclasses(class_type): # type: (t.Type[C]) -> t.List[t.Type[C]]
"""Returns a list of types that are concrete subclasses of the given type."""
subclasses = set() # type: t.Set[t.Type[C]]
queue = [class_type] # type: t.List[t.Type[C]]
while queue:
parent = queue.pop()
for child in parent.__subclasses__():
if child not in subclasses:
if not inspect.isabstract(child):
subclasses.add(child)
queue.append(child)
return sorted(subclasses, key=lambda sc: sc.__name__)
def is_subdir(candidate_path, path): # type: (str, str) -> bool
"""Returns true if candidate_path is path or a subdirectory of path."""
if not path.endswith(os.path.sep):
path += os.path.sep
if not candidate_path.endswith(os.path.sep):
candidate_path += os.path.sep
return candidate_path.startswith(path)
def paths_to_dirs(paths): # type: (t.List[str]) -> t.List[str]
"""Returns a list of directories extracted from the given list of paths."""
dir_names = set()
for path in paths:
while True:
path = os.path.dirname(path)
if not path or path == os.path.sep:
break
dir_names.add(path + os.path.sep)
return sorted(dir_names)
def str_to_version(version): # type: (str) -> t.Tuple[int, ...]
"""Return a version tuple from a version string."""
return tuple(int(n) for n in version.split('.'))
def version_to_str(version): # type: (t.Tuple[int, ...]) -> str
"""Return a version string from a version tuple."""
return '.'.join(str(n) for n in version)
def sorted_versions(versions): # type: (t.List[str]) -> t.List[str]
"""Return a sorted copy of the given list of versions."""
return [version_to_str(version) for version in sorted(str_to_version(version) for version in versions)]
def import_plugins(directory, root=None): # type: (str, t.Optional[str]) -> None
"""
Import plugins from the given directory relative to the given root.
If the root is not provided, the 'lib' directory for the test runner will be used.
"""
if root is None:
root = os.path.dirname(__file__)
path = os.path.join(root, directory)
package = __name__.rsplit('.', 1)[0]
prefix = '%s.%s.' % (package, directory.replace(os.path.sep, '.'))
for (_module_loader, name, _ispkg) in pkgutil.iter_modules([path], prefix=prefix):
module_path = os.path.join(root, name[len(package) + 1:].replace('.', os.path.sep) + '.py')
load_module(module_path, name)
def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None
"""
Load plugins of the specified type and track them in the specified database.
Only plugins which have already been imported will be loaded.
"""
plugins = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type)) # type: t.Dict[str, t.Type[C]]
for plugin in plugins:
database[plugin] = plugins[plugin]
def load_module(path, name): # type: (str, str) -> None
"""Load a Python module using the given name and path."""
if name in sys.modules:
return
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec)
sys.modules[name] = module
# noinspection PyUnresolvedReferences
spec.loader.exec_module(module)
def sanitize_host_name(name):
"""Return a sanitized version of the given name, suitable for use as a hostname."""
return re.sub('[^A-Za-z0-9]+', '-', name)[:63].strip('-')
@cache
def get_host_ip():
"""Return the host's IP address."""
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.connect(('10.255.255.255', 22))
host_ip = get_host_ip.ip = sock.getsockname()[0]
display.info('Detected host IP: %s' % host_ip, verbosity=1)
return host_ip
def get_generic_type(base_type, generic_base_type): # type: (t.Type, t.Type[TType]) -> t.Optional[t.Type[TType]]
"""Return the generic type arg derived from the generic_base_type type that is associated with the base_type type, if any, otherwise return None."""
# noinspection PyUnresolvedReferences
type_arg = t.get_args(base_type.__orig_bases__[0])[0]
return None if isinstance(type_arg, generic_base_type) else type_arg
def get_type_associations(base_type, generic_base_type): # type: (t.Type[TType], t.Type[TValue]) -> t.List[t.Tuple[t.Type[TValue], t.Type[TType]]]
"""Create and return a list of tuples associating generic_base_type derived types with a corresponding base_type derived type."""
return [item for item in [(get_generic_type(sc_type, generic_base_type), sc_type) for sc_type in get_subclasses(base_type)] if item[1]]
def get_type_map(base_type, generic_base_type): # type: (t.Type[TType], t.Type[TValue]) -> t.Dict[t.Type[TValue], t.Type[TType]]
"""Create and return a mapping of generic_base_type derived types to base_type derived types."""
return {item[0]: item[1] for item in get_type_associations(base_type, generic_base_type)}
def verify_sys_executable(path): # type: (str) -> t.Optional[str]
"""Verify that the given path references the current Python interpreter. If not, return the expected path, otherwise return None."""
if path == sys.executable:
return None
if os.path.realpath(path) == os.path.realpath(sys.executable):
return None
expected_executable = raw_command([path, '-c', 'import sys; print(sys.executable)'], capture=True)[0]
if expected_executable == sys.executable:
return None
return expected_executable
display = Display() # pylint: disable=locally-disabled, invalid-name
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,101 |
`ansible-vault encrypt_string --help` shows the `--output` option but it remains without effect.
|
### Summary
`ansible-vault encrypt_string --help` shows the `--output` option but it remains without effect.
### Issue Type
Bug Report
### Component Name
ansible-vault
### Ansible Version
```console
$ ansible --version
ansible 2.9.21
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/elavarde/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.9/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.9.5 (default, May 14 2021, 00:00:00) [GCC 11.1.1 20210428 (Red Hat 11.1.1-1)]
```
### Configuration
```console
$ ansible-config dump --only-changed
[... empty ...]
```
### OS / Environment
Fedora 34
### Steps to Reproduce
```shell
ansible-vault encrypt_string --ask-vault-pass --name myvar --output vault.yml 'some secret'
```
### Expected Results
Variable and its encrypted content are written to file and not to stdout _or_ the option `--output` isn't shown in the help.
Making the `--output` version working would probably be a duplicate of issue #59590, so probably removing the `--output` option which doesn't work could be a simpler step. More important for now is that it's consistent.
### Actual Results
```console
- output file isn't created/edited
- variable and its content are output to stdout
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75101
|
https://github.com/ansible/ansible/pull/76856
|
18ed2c64e57816f0a8ecd2fbda3e5538b45da5ea
|
f501b579e51c05565d852feb676fa38cddf3f2ed
| 2021-06-24T06:41:22Z |
python
| 2022-01-28T08:34:59Z |
changelogs/fragments/75101-ansible-vault-encrypt_string-output-file.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,101 |
`ansible-vault encrypt_string --help` shows the `--output` option but it remains without effect.
|
### Summary
`ansible-vault encrypt_string --help` shows the `--output` option but it remains without effect.
### Issue Type
Bug Report
### Component Name
ansible-vault
### Ansible Version
```console
$ ansible --version
ansible 2.9.21
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/elavarde/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.9/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.9.5 (default, May 14 2021, 00:00:00) [GCC 11.1.1 20210428 (Red Hat 11.1.1-1)]
```
### Configuration
```console
$ ansible-config dump --only-changed
[... empty ...]
```
### OS / Environment
Fedora 34
### Steps to Reproduce
```shell
ansible-vault encrypt_string --ask-vault-pass --name myvar --output vault.yml 'some secret'
```
### Expected Results
Variable and its encrypted content are written to file and not to stdout _or_ the option `--output` isn't shown in the help.
Making the `--output` version working would probably be a duplicate of issue #59590, so probably removing the `--output` option which doesn't work could be a simpler step. More important for now is that it's consistent.
### Actual Results
```console
- output file isn't created/edited
- variable and its content are output to stdout
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75101
|
https://github.com/ansible/ansible/pull/76856
|
18ed2c64e57816f0a8ecd2fbda3e5538b45da5ea
|
f501b579e51c05565d852feb676fa38cddf3f2ed
| 2021-06-24T06:41:22Z |
python
| 2022-01-28T08:34:59Z |
lib/ansible/cli/vault.py
|
#!/usr/bin/env python
# (c) 2014, James Tanner <[email protected]>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import os
import sys
from ansible import constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleOptionsError
from ansible.module_utils._text import to_text, to_bytes
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.vault import VaultEditor, VaultLib, match_encrypt_secret
from ansible.utils.display import Display
display = Display()
class VaultCLI(CLI):
''' can encrypt any structured data file used by Ansible.
This can include *group_vars/* or *host_vars/* inventory variables,
variables loaded by *include_vars* or *vars_files*, or variable files
passed on the ansible-playbook command line with *-e @file.yml* or *-e @file.json*.
Role variables and defaults are also included!
Because Ansible tasks, handlers, and other objects are data, these can also be encrypted with vault.
If you'd like to not expose what variables you are using, you can keep an individual task file entirely encrypted.
'''
name = 'ansible-vault'
FROM_STDIN = "stdin"
FROM_ARGS = "the command line args"
FROM_PROMPT = "the interactive prompt"
def __init__(self, args):
self.b_vault_pass = None
self.b_new_vault_pass = None
self.encrypt_string_read_stdin = False
self.encrypt_secret = None
self.encrypt_vault_id = None
self.new_encrypt_secret = None
self.new_encrypt_vault_id = None
super(VaultCLI, self).__init__(args)
def init_parser(self):
super(VaultCLI, self).init_parser(
desc="encryption/decryption utility for Ansible data files",
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
common = opt_help.argparse.ArgumentParser(add_help=False)
opt_help.add_vault_options(common)
opt_help.add_verbosity_options(common)
subparsers = self.parser.add_subparsers(dest='action')
subparsers.required = True
output = opt_help.argparse.ArgumentParser(add_help=False)
output.add_argument('--output', default=None, dest='output_file',
help='output file name for encrypt or decrypt; use - for stdout',
type=opt_help.unfrack_path())
# For encrypting actions, we can also specify which of multiple vault ids should be used for encrypting
vault_id = opt_help.argparse.ArgumentParser(add_help=False)
vault_id.add_argument('--encrypt-vault-id', default=[], dest='encrypt_vault_id',
action='store', type=str,
help='the vault id used to encrypt (required if more than one vault-id is provided)')
create_parser = subparsers.add_parser('create', help='Create new vault encrypted file', parents=[vault_id, common])
create_parser.set_defaults(func=self.execute_create)
create_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
decrypt_parser = subparsers.add_parser('decrypt', help='Decrypt vault encrypted file', parents=[output, common])
decrypt_parser.set_defaults(func=self.execute_decrypt)
decrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
edit_parser = subparsers.add_parser('edit', help='Edit vault encrypted file', parents=[vault_id, common])
edit_parser.set_defaults(func=self.execute_edit)
edit_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
view_parser = subparsers.add_parser('view', help='View vault encrypted file', parents=[common])
view_parser.set_defaults(func=self.execute_view)
view_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
encrypt_parser = subparsers.add_parser('encrypt', help='Encrypt YAML file', parents=[common, output, vault_id])
encrypt_parser.set_defaults(func=self.execute_encrypt)
encrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
enc_str_parser = subparsers.add_parser('encrypt_string', help='Encrypt a string', parents=[common, output, vault_id])
enc_str_parser.set_defaults(func=self.execute_encrypt_string)
enc_str_parser.add_argument('args', help='String to encrypt', metavar='string_to_encrypt', nargs='*')
enc_str_parser.add_argument('-p', '--prompt', dest='encrypt_string_prompt',
action='store_true',
help="Prompt for the string to encrypt")
enc_str_parser.add_argument('--show-input', dest='show_string_input', default=False, action='store_true',
help='Do not hide input when prompted for the string to encrypt')
enc_str_parser.add_argument('-n', '--name', dest='encrypt_string_names',
action='append',
help="Specify the variable name")
enc_str_parser.add_argument('--stdin-name', dest='encrypt_string_stdin_name',
default=None,
help="Specify the variable name for stdin")
rekey_parser = subparsers.add_parser('rekey', help='Re-key a vault encrypted file', parents=[common, vault_id])
rekey_parser.set_defaults(func=self.execute_rekey)
rekey_new_group = rekey_parser.add_mutually_exclusive_group()
rekey_new_group.add_argument('--new-vault-password-file', default=None, dest='new_vault_password_file',
help="new vault password file for rekey", type=opt_help.unfrack_path())
rekey_new_group.add_argument('--new-vault-id', default=None, dest='new_vault_id', type=str,
help='the new vault identity to use for rekey')
rekey_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
def post_process_args(self, options):
options = super(VaultCLI, self).post_process_args(options)
display.verbosity = options.verbosity
if options.vault_ids:
for vault_id in options.vault_ids:
if u';' in vault_id:
raise AnsibleOptionsError("'%s' is not a valid vault id. The character ';' is not allowed in vault ids" % vault_id)
if getattr(options, 'output_file', None) and len(options.args) > 1:
raise AnsibleOptionsError("At most one input file may be used with the --output option")
if options.action == 'encrypt_string':
if '-' in options.args or not options.args or options.encrypt_string_stdin_name:
self.encrypt_string_read_stdin = True
# TODO: prompting from stdin and reading from stdin seem mutually exclusive, but verify that.
if options.encrypt_string_prompt and self.encrypt_string_read_stdin:
raise AnsibleOptionsError('The --prompt option is not supported if also reading input from stdin')
return options
def run(self):
super(VaultCLI, self).run()
loader = DataLoader()
# set default restrictive umask
old_umask = os.umask(0o077)
vault_ids = list(context.CLIARGS['vault_ids'])
# there are 3 types of actions, those that just 'read' (decrypt, view) and only
# need to ask for a password once, and those that 'write' (create, encrypt) that
# ask for a new password and confirm it, and 'read/write (rekey) that asks for the
# old password, then asks for a new one and confirms it.
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
action = context.CLIARGS['action']
# TODO: instead of prompting for these before, we could let VaultEditor
# call a callback when it needs it.
if action in ['decrypt', 'view', 'rekey', 'edit']:
vault_secrets = self.setup_vault_secrets(loader, vault_ids=vault_ids,
vault_password_files=list(context.CLIARGS['vault_password_files']),
ask_vault_pass=context.CLIARGS['ask_vault_pass'])
if not vault_secrets:
raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
if action in ['encrypt', 'encrypt_string', 'create']:
encrypt_vault_id = None
# no --encrypt-vault-id context.CLIARGS['encrypt_vault_id'] for 'edit'
if action not in ['edit']:
encrypt_vault_id = context.CLIARGS['encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
vault_secrets = None
vault_secrets = \
self.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=list(context.CLIARGS['vault_password_files']),
ask_vault_pass=context.CLIARGS['ask_vault_pass'],
create_new_password=True)
if len(vault_secrets) > 1 and not encrypt_vault_id:
raise AnsibleOptionsError("The vault-ids %s are available to encrypt. Specify the vault-id to encrypt with --encrypt-vault-id" %
','.join([x[0] for x in vault_secrets]))
if not vault_secrets:
raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
encrypt_secret = match_encrypt_secret(vault_secrets,
encrypt_vault_id=encrypt_vault_id)
# only one secret for encrypt for now, use the first vault_id and use its first secret
# TODO: exception if more than one?
self.encrypt_vault_id = encrypt_secret[0]
self.encrypt_secret = encrypt_secret[1]
if action in ['rekey']:
encrypt_vault_id = context.CLIARGS['encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
# print('encrypt_vault_id: %s' % encrypt_vault_id)
# print('default_encrypt_vault_id: %s' % default_encrypt_vault_id)
# new_vault_ids should only ever be one item, from
# load the default vault ids if we are using encrypt-vault-id
new_vault_ids = []
if encrypt_vault_id:
new_vault_ids = default_vault_ids
if context.CLIARGS['new_vault_id']:
new_vault_ids.append(context.CLIARGS['new_vault_id'])
new_vault_password_files = []
if context.CLIARGS['new_vault_password_file']:
new_vault_password_files.append(context.CLIARGS['new_vault_password_file'])
new_vault_secrets = \
self.setup_vault_secrets(loader,
vault_ids=new_vault_ids,
vault_password_files=new_vault_password_files,
ask_vault_pass=context.CLIARGS['ask_vault_pass'],
create_new_password=True)
if not new_vault_secrets:
raise AnsibleOptionsError("A new vault password is required to use Ansible's Vault rekey")
# There is only one new_vault_id currently and one new_vault_secret, or we
# use the id specified in --encrypt-vault-id
new_encrypt_secret = match_encrypt_secret(new_vault_secrets,
encrypt_vault_id=encrypt_vault_id)
self.new_encrypt_vault_id = new_encrypt_secret[0]
self.new_encrypt_secret = new_encrypt_secret[1]
loader.set_vault_secrets(vault_secrets)
# FIXME: do we need to create VaultEditor here? its not reused
vault = VaultLib(vault_secrets)
self.editor = VaultEditor(vault)
context.CLIARGS['func']()
# and restore umask
os.umask(old_umask)
def execute_encrypt(self):
''' encrypt the supplied file using the provided vault secret '''
if not context.CLIARGS['args'] and sys.stdin.isatty():
display.display("Reading plaintext input from stdin", stderr=True)
for f in context.CLIARGS['args'] or ['-']:
# Fixme: use the correct vau
self.editor.encrypt_file(f, self.encrypt_secret,
vault_id=self.encrypt_vault_id,
output_file=context.CLIARGS['output_file'])
if sys.stdout.isatty():
display.display("Encryption successful", stderr=True)
@staticmethod
def format_ciphertext_yaml(b_ciphertext, indent=None, name=None):
indent = indent or 10
block_format_var_name = ""
if name:
block_format_var_name = "%s: " % name
block_format_header = "%s!vault |" % block_format_var_name
lines = []
vault_ciphertext = to_text(b_ciphertext)
lines.append(block_format_header)
for line in vault_ciphertext.splitlines():
lines.append('%s%s' % (' ' * indent, line))
yaml_ciphertext = '\n'.join(lines)
return yaml_ciphertext
def execute_encrypt_string(self):
''' encrypt the supplied string using the provided vault secret '''
b_plaintext = None
# Holds tuples (the_text, the_source_of_the_string, the variable name if its provided).
b_plaintext_list = []
# remove the non-option '-' arg (used to indicate 'read from stdin') from the candidate args so
# we don't add it to the plaintext list
args = [x for x in context.CLIARGS['args'] if x != '-']
# We can prompt and read input, or read from stdin, but not both.
if context.CLIARGS['encrypt_string_prompt']:
msg = "String to encrypt: "
name = None
name_prompt_response = display.prompt('Variable name (enter for no name): ')
# TODO: enforce var naming rules?
if name_prompt_response != "":
name = name_prompt_response
# TODO: could prompt for which vault_id to use for each plaintext string
# currently, it will just be the default
hide_input = not context.CLIARGS['show_string_input']
if hide_input:
msg = "String to encrypt (hidden): "
else:
msg = "String to encrypt:"
prompt_response = display.prompt(msg, private=hide_input)
if prompt_response == '':
raise AnsibleOptionsError('The plaintext provided from the prompt was empty, not encrypting')
b_plaintext = to_bytes(prompt_response)
b_plaintext_list.append((b_plaintext, self.FROM_PROMPT, name))
# read from stdin
if self.encrypt_string_read_stdin:
if sys.stdout.isatty():
display.display("Reading plaintext input from stdin. (ctrl-d to end input, twice if your content does not already have a newline)", stderr=True)
stdin_text = sys.stdin.read()
if stdin_text == '':
raise AnsibleOptionsError('stdin was empty, not encrypting')
if sys.stdout.isatty() and not stdin_text.endswith("\n"):
display.display("\n")
b_plaintext = to_bytes(stdin_text)
# defaults to None
name = context.CLIARGS['encrypt_string_stdin_name']
b_plaintext_list.append((b_plaintext, self.FROM_STDIN, name))
# use any leftover args as strings to encrypt
# Try to match args up to --name options
if context.CLIARGS.get('encrypt_string_names', False):
name_and_text_list = list(zip(context.CLIARGS['encrypt_string_names'], args))
# Some but not enough --name's to name each var
if len(args) > len(name_and_text_list):
# Trying to avoid ever showing the plaintext in the output, so this warning is vague to avoid that.
display.display('The number of --name options do not match the number of args.',
stderr=True)
display.display('The last named variable will be "%s". The rest will not have'
' names.' % context.CLIARGS['encrypt_string_names'][-1],
stderr=True)
# Add the rest of the args without specifying a name
for extra_arg in args[len(name_and_text_list):]:
name_and_text_list.append((None, extra_arg))
# if no --names are provided, just use the args without a name.
else:
name_and_text_list = [(None, x) for x in args]
# Convert the plaintext text objects to bytestrings and collect
for name_and_text in name_and_text_list:
name, plaintext = name_and_text
if plaintext == '':
raise AnsibleOptionsError('The plaintext provided from the command line args was empty, not encrypting')
b_plaintext = to_bytes(plaintext)
b_plaintext_list.append((b_plaintext, self.FROM_ARGS, name))
# TODO: specify vault_id per string?
# Format the encrypted strings and any corresponding stderr output
outputs = self._format_output_vault_strings(b_plaintext_list, vault_id=self.encrypt_vault_id)
for output in outputs:
err = output.get('err', None)
out = output.get('out', '')
if err:
sys.stderr.write(err)
print(out)
if sys.stdout.isatty():
display.display("Encryption successful", stderr=True)
# TODO: offer block or string ala eyaml
def _format_output_vault_strings(self, b_plaintext_list, vault_id=None):
# If we are only showing one item in the output, we don't need to included commented
# delimiters in the text
show_delimiter = False
if len(b_plaintext_list) > 1:
show_delimiter = True
# list of dicts {'out': '', 'err': ''}
output = []
# Encrypt the plaintext, and format it into a yaml block that can be pasted into a playbook.
# For more than one input, show some differentiating info in the stderr output so we can tell them
# apart. If we have a var name, we include that in the yaml
for index, b_plaintext_info in enumerate(b_plaintext_list):
# (the text itself, which input it came from, its name)
b_plaintext, src, name = b_plaintext_info
b_ciphertext = self.editor.encrypt_bytes(b_plaintext, self.encrypt_secret,
vault_id=vault_id)
# block formatting
yaml_text = self.format_ciphertext_yaml(b_ciphertext, name=name)
err_msg = None
if show_delimiter:
human_index = index + 1
if name:
err_msg = '# The encrypted version of variable ("%s", the string #%d from %s).\n' % (name, human_index, src)
else:
err_msg = '# The encrypted version of the string #%d from %s.)\n' % (human_index, src)
output.append({'out': yaml_text, 'err': err_msg})
return output
def execute_decrypt(self):
''' decrypt the supplied file using the provided vault secret '''
if not context.CLIARGS['args'] and sys.stdin.isatty():
display.display("Reading ciphertext input from stdin", stderr=True)
for f in context.CLIARGS['args'] or ['-']:
self.editor.decrypt_file(f, output_file=context.CLIARGS['output_file'])
if sys.stdout.isatty():
display.display("Decryption successful", stderr=True)
def execute_create(self):
''' create and open a file in an editor that will be encrypted with the provided vault secret when closed'''
if len(context.CLIARGS['args']) != 1:
raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
self.editor.create_file(context.CLIARGS['args'][0], self.encrypt_secret,
vault_id=self.encrypt_vault_id)
def execute_edit(self):
''' open and decrypt an existing vaulted file in an editor, that will be encrypted again when closed'''
for f in context.CLIARGS['args']:
self.editor.edit_file(f)
def execute_view(self):
''' open, decrypt and view an existing vaulted file using a pager using the supplied vault secret '''
for f in context.CLIARGS['args']:
# Note: vault should return byte strings because it could encrypt
# and decrypt binary files. We are responsible for changing it to
# unicode here because we are displaying it and therefore can make
# the decision that the display doesn't have to be precisely what
# the input was (leave that to decrypt instead)
plaintext = self.editor.plaintext(f)
self.pager(to_text(plaintext))
def execute_rekey(self):
''' re-encrypt a vaulted file with a new secret, the previous secret is required '''
for f in context.CLIARGS['args']:
# FIXME: plumb in vault_id, use the default new_vault_secret for now
self.editor.rekey_file(f, self.new_encrypt_secret,
self.new_encrypt_vault_id)
display.display("Rekey successful", stderr=True)
def main(args=None):
VaultCLI.cli_executor(args)
if __name__ == '__main__':
main()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,101 |
`ansible-vault encrypt_string --help` shows the `--output` option but it remains without effect.
|
### Summary
`ansible-vault encrypt_string --help` shows the `--output` option but it remains without effect.
### Issue Type
Bug Report
### Component Name
ansible-vault
### Ansible Version
```console
$ ansible --version
ansible 2.9.21
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/elavarde/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.9/site-packages/ansible
executable location = /usr/bin/ansible
python version = 3.9.5 (default, May 14 2021, 00:00:00) [GCC 11.1.1 20210428 (Red Hat 11.1.1-1)]
```
### Configuration
```console
$ ansible-config dump --only-changed
[... empty ...]
```
### OS / Environment
Fedora 34
### Steps to Reproduce
```shell
ansible-vault encrypt_string --ask-vault-pass --name myvar --output vault.yml 'some secret'
```
### Expected Results
Variable and its encrypted content are written to file and not to stdout _or_ the option `--output` isn't shown in the help.
Making the `--output` version working would probably be a duplicate of issue #59590, so probably removing the `--output` option which doesn't work could be a simpler step. More important for now is that it's consistent.
### Actual Results
```console
- output file isn't created/edited
- variable and its content are output to stdout
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75101
|
https://github.com/ansible/ansible/pull/76856
|
18ed2c64e57816f0a8ecd2fbda3e5538b45da5ea
|
f501b579e51c05565d852feb676fa38cddf3f2ed
| 2021-06-24T06:41:22Z |
python
| 2022-01-28T08:34:59Z |
test/integration/targets/ansible-vault/runme.sh
|
#!/usr/bin/env bash
set -euvx
source virtualenv.sh
MYTMPDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir')
trap 'rm -rf "${MYTMPDIR}"' EXIT
# create a test file
TEST_FILE="${MYTMPDIR}/test_file"
echo "This is a test file" > "${TEST_FILE}"
TEST_FILE_1_2="${MYTMPDIR}/test_file_1_2"
echo "This is a test file for format 1.2" > "${TEST_FILE_1_2}"
TEST_FILE_ENC_PASSWORD="${MYTMPDIR}/test_file_enc_password"
echo "This is a test file for encrypted with a vault password that is itself vault encrypted" > "${TEST_FILE_ENC_PASSWORD}"
TEST_FILE_ENC_PASSWORD_DEFAULT="${MYTMPDIR}/test_file_enc_password_default"
echo "This is a test file for encrypted with a vault password that is itself vault encrypted using --encrypted-vault-id default" > "${TEST_FILE_ENC_PASSWORD_DEFAULT}"
TEST_FILE_OUTPUT="${MYTMPDIR}/test_file_output"
TEST_FILE_EDIT="${MYTMPDIR}/test_file_edit"
echo "This is a test file for edit" > "${TEST_FILE_EDIT}"
TEST_FILE_EDIT2="${MYTMPDIR}/test_file_edit2"
echo "This is a test file for edit2" > "${TEST_FILE_EDIT2}"
# test case for https://github.com/ansible/ansible/issues/35834
# (being prompted for new password on vault-edit with no configured passwords)
TEST_FILE_EDIT3="${MYTMPDIR}/test_file_edit3"
echo "This is a test file for edit3" > "${TEST_FILE_EDIT3}"
# ansible-config view
ansible-config view
# ansible-config
ansible-config dump --only-changed
ansible-vault encrypt "$@" --vault-id vault-password "${TEST_FILE_EDIT3}"
# EDITOR=./faux-editor.py ansible-vault edit "$@" "${TEST_FILE_EDIT3}"
EDITOR=./faux-editor.py ansible-vault edit --vault-id vault-password -vvvvv "${TEST_FILE_EDIT3}"
echo $?
# view the vault encrypted password file
ansible-vault view "$@" --vault-id vault-password encrypted-vault-password
# encrypt with a password from a vault encrypted password file and multiple vault-ids
# should fail because we dont know which vault id to use to encrypt with
ansible-vault encrypt "$@" --vault-id vault-password --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}" && :
WRONG_RC=$?
echo "rc was $WRONG_RC (5 is expected)"
[ $WRONG_RC -eq 5 ]
# try to view the file encrypted with the vault-password we didnt specify
# to verify we didnt choose the wrong vault-id
ansible-vault view "$@" --vault-id vault-password encrypted-vault-password
FORMAT_1_1_HEADER="\$ANSIBLE_VAULT;1.1;AES256"
FORMAT_1_2_HEADER="\$ANSIBLE_VAULT;1.2;AES256"
VAULT_PASSWORD_FILE=vault-password
# new format, view, using password client script
ansible-vault view "$@" --vault-id [email protected] format_1_1_AES256.yml
# view, using password client script, unknown vault/keyname
ansible-vault view "$@" --vault-id [email protected] format_1_1_AES256.yml && :
# Use linux setsid to test without a tty. No setsid if osx/bsd though...
if [ -x "$(command -v setsid)" ]; then
# tests related to https://github.com/ansible/ansible/issues/30993
CMD='ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml'
setsid sh -c "echo test-vault-password|${CMD}" < /dev/null > log 2>&1 && :
WRONG_RC=$?
cat log
echo "rc was $WRONG_RC (0 is expected)"
[ $WRONG_RC -eq 0 ]
setsid sh -c 'tty; ansible-vault view --ask-vault-pass -vvvvv test_vault.yml' < /dev/null > log 2>&1 && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
cat log
setsid sh -c 'tty; echo passbhkjhword|ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1 && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
cat log
setsid sh -c 'tty; echo test-vault-password |ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1
echo $?
cat log
setsid sh -c 'tty; echo test-vault-password|ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1
echo $?
cat log
setsid sh -c 'tty; echo test-vault-password |ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1
echo $?
cat log
setsid sh -c 'tty; echo test-vault-password|ansible-vault view --ask-vault-pass -vvvvv vaulted.inventory' < /dev/null > log 2>&1
echo $?
cat log
# test using --ask-vault-password option
CMD='ansible-playbook -i ../../inventory -vvvvv --ask-vault-password test_vault.yml'
setsid sh -c "echo test-vault-password|${CMD}" < /dev/null > log 2>&1 && :
WRONG_RC=$?
cat log
echo "rc was $WRONG_RC (0 is expected)"
[ $WRONG_RC -eq 0 ]
fi
ansible-vault view "$@" --vault-password-file vault-password-wrong format_1_1_AES256.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
set -eux
# new format, view
ansible-vault view "$@" --vault-password-file vault-password format_1_1_AES256.yml
# new format, view with vault-id
ansible-vault view "$@" --vault-id=vault-password format_1_1_AES256.yml
# new format, view, using password script
ansible-vault view "$@" --vault-password-file password-script.py format_1_1_AES256.yml
# new format, view, using password script with vault-id
ansible-vault view "$@" --vault-id password-script.py format_1_1_AES256.yml
# new 1.2 format, view
ansible-vault view "$@" --vault-password-file vault-password format_1_2_AES256.yml
# new 1.2 format, view with vault-id
ansible-vault view "$@" --vault-id=test_vault_id@vault-password format_1_2_AES256.yml
# new 1,2 format, view, using password script
ansible-vault view "$@" --vault-password-file password-script.py format_1_2_AES256.yml
# new 1.2 format, view, using password script with vault-id
ansible-vault view "$@" --vault-id password-script.py format_1_2_AES256.yml
# newish 1.1 format, view, using a vault-id list from config env var
ANSIBLE_VAULT_IDENTITY_LIST='wrong-password@vault-password-wrong,default@vault-password' ansible-vault view "$@" --vault-id password-script.py format_1_1_AES256.yml
# new 1.2 format, view, ENFORCE_IDENTITY_MATCH=true, should fail, no 'test_vault_id' vault_id
ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-password-file vault-password format_1_2_AES256.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# new 1.2 format, view with vault-id, ENFORCE_IDENTITY_MATCH=true, should work, 'test_vault_id' is provided
ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-id=test_vault_id@vault-password format_1_2_AES256.yml
# new 1,2 format, view, using password script, ENFORCE_IDENTITY_MATCH=true, should fail, no 'test_vault_id'
ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-password-file password-script.py format_1_2_AES256.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# new 1.2 format, view, using password script with vault-id, ENFORCE_IDENTITY_MATCH=true, should fail
ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-id password-script.py format_1_2_AES256.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# new 1.2 format, view, using password script with vault-id, ENFORCE_IDENTITY_MATCH=true, 'test_vault_id' provided should work
ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" [email protected] format_1_2_AES256.yml
# test with a default vault password set via config/env, right password
ANSIBLE_VAULT_PASSWORD_FILE=vault-password ansible-vault view "$@" format_1_1_AES256.yml
# test with a default vault password set via config/env, wrong password
ANSIBLE_VAULT_PASSWORD_FILE=vault-password-wrong ansible-vault view "$@" format_1_1_AES.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# test with a default vault-id list set via config/env, right password
ANSIBLE_VAULT_PASSWORD_FILE=wrong@vault-password-wrong,correct@vault-password ansible-vault view "$@" format_1_1_AES.yml && :
# test with a default vault-id list set via config/env,wrong passwords
ANSIBLE_VAULT_PASSWORD_FILE=wrong@vault-password-wrong,alsowrong@vault-password-wrong ansible-vault view "$@" format_1_1_AES.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# try specifying a --encrypt-vault-id that doesnt exist, should exit with an error indicating
# that --encrypt-vault-id and the known vault-ids
ansible-vault encrypt "$@" --vault-password-file vault-password --encrypt-vault-id doesnt_exist "${TEST_FILE}" && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# encrypt it
ansible-vault encrypt "$@" --vault-password-file vault-password "${TEST_FILE}"
ansible-vault view "$@" --vault-password-file vault-password "${TEST_FILE}"
# view with multiple vault-password files, including a wrong one
ansible-vault view "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong "${TEST_FILE}"
# view with multiple vault-password files, including a wrong one, using vault-id
ansible-vault view "$@" --vault-id vault-password --vault-id vault-password-wrong "${TEST_FILE}"
# And with the password files specified in a different order
ansible-vault view "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password "${TEST_FILE}"
# And with the password files specified in a different order, using vault-id
ansible-vault view "$@" --vault-id vault-password-wrong --vault-id vault-password "${TEST_FILE}"
# And with the password files specified in a different order, using --vault-id and non default vault_ids
ansible-vault view "$@" --vault-id test_vault_id@vault-password-wrong --vault-id test_vault_id@vault-password "${TEST_FILE}"
ansible-vault decrypt "$@" --vault-password-file vault-password "${TEST_FILE}"
# encrypt it, using a vault_id so we write a 1.2 format file
ansible-vault encrypt "$@" --vault-id test_vault_1_2@vault-password "${TEST_FILE_1_2}"
ansible-vault view "$@" --vault-id vault-password "${TEST_FILE_1_2}"
ansible-vault view "$@" --vault-id test_vault_1_2@vault-password "${TEST_FILE_1_2}"
# view with multiple vault-password files, including a wrong one
ansible-vault view "$@" --vault-id vault-password --vault-id wrong_password@vault-password-wrong "${TEST_FILE_1_2}"
# And with the password files specified in a different order, using vault-id
ansible-vault view "$@" --vault-id vault-password-wrong --vault-id vault-password "${TEST_FILE_1_2}"
# And with the password files specified in a different order, using --vault-id and non default vault_ids
ansible-vault view "$@" --vault-id test_vault_id@vault-password-wrong --vault-id test_vault_id@vault-password "${TEST_FILE_1_2}"
ansible-vault decrypt "$@" --vault-id test_vault_1_2@vault-password "${TEST_FILE_1_2}"
# multiple vault passwords
ansible-vault view "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong format_1_1_AES256.yml
# multiple vault passwords, --vault-id
ansible-vault view "$@" --vault-id test_vault_id@vault-password --vault-id test_vault_id@vault-password-wrong format_1_1_AES256.yml
# encrypt it, with password from password script
ansible-vault encrypt "$@" --vault-password-file password-script.py "${TEST_FILE}"
ansible-vault view "$@" --vault-password-file password-script.py "${TEST_FILE}"
ansible-vault decrypt "$@" --vault-password-file password-script.py "${TEST_FILE}"
# encrypt it, with password from password script
ansible-vault encrypt "$@" --vault-id [email protected] "${TEST_FILE}"
ansible-vault view "$@" --vault-id [email protected] "${TEST_FILE}"
ansible-vault decrypt "$@" --vault-id [email protected] "${TEST_FILE}"
# new password file for rekeyed file
NEW_VAULT_PASSWORD="${MYTMPDIR}/new-vault-password"
echo "newpassword" > "${NEW_VAULT_PASSWORD}"
ansible-vault encrypt "$@" --vault-password-file vault-password "${TEST_FILE}"
ansible-vault rekey "$@" --vault-password-file vault-password --new-vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}"
# --new-vault-password-file and --new-vault-id should cause options error
ansible-vault rekey "$@" --vault-password-file vault-password --new-vault-id=foobar --new-vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}" && :
WRONG_RC=$?
echo "rc was $WRONG_RC (2 is expected)"
[ $WRONG_RC -eq 2 ]
ansible-vault view "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}"
# view file with unicode in filename
ansible-vault view "$@" --vault-password-file vault-password vault-café.yml
# view with old password file and new password file
ansible-vault view "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --vault-password-file vault-password "${TEST_FILE}"
# view with old password file and new password file, different order
ansible-vault view "$@" --vault-password-file vault-password --vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}"
# view with old password file and new password file and another wrong
ansible-vault view "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --vault-password-file vault-password-wrong --vault-password-file vault-password "${TEST_FILE}"
# view with old password file and new password file and another wrong, using --vault-id
ansible-vault view "$@" --vault-id "tmp_new_password@${NEW_VAULT_PASSWORD}" --vault-id wrong_password@vault-password-wrong --vault-id myorg@vault-password "${TEST_FILE}"
ansible-vault decrypt "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}"
# reading/writing to/from stdin/stdin (See https://github.com/ansible/ansible/issues/23567)
ansible-vault encrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output="${TEST_FILE_OUTPUT}" < "${TEST_FILE}"
OUTPUT=$(ansible-vault decrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output=- < "${TEST_FILE_OUTPUT}")
echo "${OUTPUT}" | grep 'This is a test file'
OUTPUT_DASH=$(ansible-vault decrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output=- "${TEST_FILE_OUTPUT}")
echo "${OUTPUT_DASH}" | grep 'This is a test file'
OUTPUT_DASH_SPACE=$(ansible-vault decrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output - "${TEST_FILE_OUTPUT}")
echo "${OUTPUT_DASH_SPACE}" | grep 'This is a test file'
# test using an empty vault password file
ansible-vault view "$@" --vault-password-file empty-password format_1_1_AES256.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
ansible-vault view "$@" --vault-id=empty@empty-password --vault-password-file empty-password format_1_1_AES256.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
echo 'foo' > some_file.txt
ansible-vault encrypt "$@" --vault-password-file empty-password some_file.txt && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" "a test string"
# Test with multiple vault password files
# https://github.com/ansible/ansible/issues/57172
env ANSIBLE_VAULT_PASSWORD_FILE=vault-password ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --encrypt-vault-id default "a test string"
ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --name "blippy" "a test string names blippy"
ansible-vault encrypt_string "$@" --vault-id "${NEW_VAULT_PASSWORD}" "a test string"
ansible-vault encrypt_string "$@" --vault-id "${NEW_VAULT_PASSWORD}" --name "blippy" "a test string names blippy"
# from stdin
ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" < "${TEST_FILE}"
ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --stdin-name "the_var_from_stdin" < "${TEST_FILE}"
# write to file
ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --name "blippy" "a test string names blippy" --output "${MYTMPDIR}/enc_string_test_file"
# test ansible-vault edit with a faux editor
ansible-vault encrypt "$@" --vault-password-file vault-password "${TEST_FILE_EDIT}"
# edit a 1.1 format with no vault-id, should stay 1.1
EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-password-file vault-password "${TEST_FILE_EDIT}"
head -1 "${TEST_FILE_EDIT}" | grep "${FORMAT_1_1_HEADER}"
# edit a 1.1 format with vault-id, should stay 1.1
cat "${TEST_FILE_EDIT}"
EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-id vault_password@vault-password "${TEST_FILE_EDIT}"
cat "${TEST_FILE_EDIT}"
head -1 "${TEST_FILE_EDIT}" | grep "${FORMAT_1_1_HEADER}"
ansible-vault encrypt "$@" --vault-id vault_password@vault-password "${TEST_FILE_EDIT2}"
# verify that we aren't prompted for a new vault password on edit if we are running interactively (ie, with prompts)
# have to use setsid nd --ask-vault-pass to force a prompt to simulate.
# See https://github.com/ansible/ansible/issues/35834
setsid sh -c 'tty; echo password |ansible-vault edit --ask-vault-pass vault_test.yml' < /dev/null > log 2>&1 && :
grep 'New Vault password' log && :
WRONG_RC=$?
echo "The stdout log had 'New Vault password' in it and it is not supposed to. rc of grep was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# edit a 1.2 format with vault id, should keep vault id and 1.2 format
EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-id vault_password@vault-password "${TEST_FILE_EDIT2}"
head -1 "${TEST_FILE_EDIT2}" | grep "${FORMAT_1_2_HEADER};vault_password"
# edit a 1.2 file with no vault-id, should keep vault id and 1.2 format
EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-password-file vault-password "${TEST_FILE_EDIT2}"
head -1 "${TEST_FILE_EDIT2}" | grep "${FORMAT_1_2_HEADER};vault_password"
# encrypt with a password from a vault encrypted password file and multiple vault-ids
# should fail because we dont know which vault id to use to encrypt with
ansible-vault encrypt "$@" --vault-id vault-password --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}" && :
WRONG_RC=$?
echo "rc was $WRONG_RC (5 is expected)"
[ $WRONG_RC -eq 5 ]
# encrypt with a password from a vault encrypted password file and multiple vault-ids
# but this time specify with --encrypt-vault-id, but specifying vault-id names (instead of default)
# ansible-vault encrypt "$@" --vault-id from_vault_password@vault-password --vault-id from_encrypted_vault_password@encrypted-vault-password --encrypt-vault-id from_encrypted_vault_password "${TEST_FILE(_ENC_PASSWORD}"
# try to view the file encrypted with the vault-password we didnt specify
# to verify we didnt choose the wrong vault-id
# ansible-vault view "$@" --vault-id vault-password "${TEST_FILE_ENC_PASSWORD}" && :
# WRONG_RC=$?
# echo "rc was $WRONG_RC (1 is expected)"
# [ $WRONG_RC -eq 1 ]
ansible-vault encrypt "$@" --vault-id vault-password "${TEST_FILE_ENC_PASSWORD}"
# view the file encrypted with a password from a vault encrypted password file
ansible-vault view "$@" --vault-id vault-password --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}"
# try to view the file encrypted with a password from a vault encrypted password file but without the password to the password file.
# This should fail with an
ansible-vault view "$@" --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}" && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# test playbooks using vaulted files
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --list-tasks
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --list-hosts
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --syntax-check
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password --syntax-check
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password
ansible-playbook test_vaulted_inventory.yml -i vaulted.inventory -v "$@" --vault-password-file vault-password
ansible-playbook test_vaulted_template.yml -i ../../inventory -v "$@" --vault-password-file vault-password
# test using --vault-pass-file option
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-pass-file vault-password
# install TOML for parse toml inventory
# test playbooks using vaulted files(toml)
pip install toml
ansible-vault encrypt ./inventory.toml -v "$@" --vault-password-file=./vault-password
ansible-playbook test_vaulted_inventory_toml.yml -i ./inventory.toml -v "$@" --vault-password-file vault-password
ansible-vault decrypt ./inventory.toml -v "$@" --vault-password-file=./vault-password
# test a playbook with a host_var whose value is non-ascii utf8 (see https://github.com/ansible/ansible/issues/37258)
ansible-playbook -i ../../inventory -v "$@" --vault-id vault-password test_vaulted_utf8_value.yml
# test with password from password script
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file password-script.py
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file password-script.py
# with multiple password files
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong --syntax-check
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password
# test with a default vault password file set in config
ANSIBLE_VAULT_PASSWORD_FILE=vault-password ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong
# test using vault_identity_list config
ANSIBLE_VAULT_IDENTITY_LIST='wrong-password@vault-password-wrong,default@vault-password' ansible-playbook test_vault.yml -i ../../inventory -v "$@"
# test that we can have a vault encrypted yaml file that includes embedded vault vars
# that were encrypted with a different vault secret
ansible-playbook test_vault_file_encrypted_embedded.yml -i ../../inventory "$@" --vault-id encrypted_file_encrypted_var_password --vault-id vault-password
# with multiple password files, --vault-id, ordering
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password --vault-id vault-password-wrong
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong --vault-id vault-password
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-id vault-password --vault-id vault-password-wrong --syntax-check
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong --vault-id vault-password
# test with multiple password files, including a script, and a wrong password
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file password-script.py --vault-password-file vault-password
# test with multiple password files, including a script, and a wrong password, and a mix of --vault-id and --vault-password-file
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-id password-script.py --vault-id vault-password
# test with multiple password files, including a script, and a wrong password, and a mix of --vault-id and --vault-password-file
ansible-playbook test_vault_embedded_ids.yml -i ../../inventory -v "$@" \
--vault-password-file vault-password-wrong \
--vault-id password-script.py --vault-id example1@example1_password \
--vault-id example2@example2_password --vault-password-file example3_password \
--vault-id vault-password
# with wrong password
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# with multiple wrong passwords
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password-wrong && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# with wrong password, --vault-id
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# with multiple wrong passwords with --vault-id
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong --vault-id vault-password-wrong && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# with multiple wrong passwords with --vault-id
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id wrong1@vault-password-wrong --vault-id wrong2@vault-password-wrong && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# with empty password file
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id empty@empty-password && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# test invalid format ala https://github.com/ansible/ansible/issues/28038
EXPECTED_ERROR='Vault format unhexlify error: Non-hexadecimal digit found'
ansible-playbook "$@" -i invalid_format/inventory --vault-id invalid_format/vault-secret invalid_format/broken-host-vars-tasks.yml 2>&1 | grep "${EXPECTED_ERROR}"
EXPECTED_ERROR='Vault format unhexlify error: Odd-length string'
ansible-playbook "$@" -i invalid_format/inventory --vault-id invalid_format/vault-secret invalid_format/broken-group-vars-tasks.yml 2>&1 | grep "${EXPECTED_ERROR}"
# Run playbook with vault file with unicode in filename (https://github.com/ansible/ansible/issues/50316)
ansible-playbook -i ../../inventory -v "$@" --vault-password-file vault-password test_utf8_value_in_filename.yml
# Ensure we don't leave unencrypted temp files dangling
ansible-playbook -v "$@" --vault-password-file vault-password test_dangling_temp.yml
ansible-playbook "$@" --vault-password-file vault-password single_vault_as_string.yml
# Test that only one accessible vault password is required
export ANSIBLE_VAULT_IDENTITY_LIST="id1@./nonexistent, id2@${MYTMPDIR}/unreadable, id3@./vault-password"
touch "${MYTMPDIR}/unreadable"
sudo chmod 000 "${MYTMPDIR}/unreadable"
ansible-vault encrypt_string content
ansible-vault encrypt_string content --encrypt-vault-id id3
set +e
# Try to use a missing vault password file
ansible-vault encrypt_string content --encrypt-vault-id id1 2>&1 | tee out.txt
test $? -ne 0
grep out.txt -e '[WARNING]: Error getting vault password file (id1)'
grep out.txt -e "ERROR! Did not find a match for --encrypt-vault-id=id2 in the known vault-ids ['id3']"
# Try to use an inaccessible vault password file
ansible-vault encrypt_string content --encrypt-vault-id id2 2>&1 | tee out.txt
test $? -ne 0
grep out.txt -e "[WARNING]: Error in vault password file loading (id2)"
grep out.txt -e "ERROR! Did not find a match for --encrypt-vault-id=id2 in the known vault-ids ['id3']"
set -e
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 59,590 |
Add support for ansible-vault encrypt_string to write results to file more easily
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
Whenever I use `ansible-vault encrypt_string`, I want to take the result and add it to a variable file, however I have to manually select the text/copy/open file for editing/paste/save the change. This process is painful especially when you have multiple strings to encrypt as it doesn't work with the Unix way of things thus being very manual. I thought `ansible-vault encrypt_string --output` might be something to help here, but the help documentation is confusing as it appears this is only used with `encrypt` and `decrypt` _(commands)_.
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
lib/ansible/cli/vault.py
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
The only way I can see this working _without coming up with custom file descriptors or repurposing stderr for encrypted output so redirection will work_ is to add a new option that could append the resulting string to a file.
```shell
ansible-vault encrypt_string --encrypt-vault-id default --stdin-name --append vars_file.yml
Reading plaintext input from stdin. (ctrl-d to end input)
hunter22
Encryption successful
```
|
https://github.com/ansible/ansible/issues/59590
|
https://github.com/ansible/ansible/pull/76856
|
18ed2c64e57816f0a8ecd2fbda3e5538b45da5ea
|
f501b579e51c05565d852feb676fa38cddf3f2ed
| 2019-07-25T13:30:25Z |
python
| 2022-01-28T08:34:59Z |
changelogs/fragments/75101-ansible-vault-encrypt_string-output-file.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 59,590 |
Add support for ansible-vault encrypt_string to write results to file more easily
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
Whenever I use `ansible-vault encrypt_string`, I want to take the result and add it to a variable file, however I have to manually select the text/copy/open file for editing/paste/save the change. This process is painful especially when you have multiple strings to encrypt as it doesn't work with the Unix way of things thus being very manual. I thought `ansible-vault encrypt_string --output` might be something to help here, but the help documentation is confusing as it appears this is only used with `encrypt` and `decrypt` _(commands)_.
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
lib/ansible/cli/vault.py
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
The only way I can see this working _without coming up with custom file descriptors or repurposing stderr for encrypted output so redirection will work_ is to add a new option that could append the resulting string to a file.
```shell
ansible-vault encrypt_string --encrypt-vault-id default --stdin-name --append vars_file.yml
Reading plaintext input from stdin. (ctrl-d to end input)
hunter22
Encryption successful
```
|
https://github.com/ansible/ansible/issues/59590
|
https://github.com/ansible/ansible/pull/76856
|
18ed2c64e57816f0a8ecd2fbda3e5538b45da5ea
|
f501b579e51c05565d852feb676fa38cddf3f2ed
| 2019-07-25T13:30:25Z |
python
| 2022-01-28T08:34:59Z |
lib/ansible/cli/vault.py
|
#!/usr/bin/env python
# (c) 2014, James Tanner <[email protected]>
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# PYTHON_ARGCOMPLETE_OK
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# ansible.cli needs to be imported first, to ensure the source bin/* scripts run that code first
from ansible.cli import CLI
import os
import sys
from ansible import constants as C
from ansible import context
from ansible.cli.arguments import option_helpers as opt_help
from ansible.errors import AnsibleOptionsError
from ansible.module_utils._text import to_text, to_bytes
from ansible.parsing.dataloader import DataLoader
from ansible.parsing.vault import VaultEditor, VaultLib, match_encrypt_secret
from ansible.utils.display import Display
display = Display()
class VaultCLI(CLI):
''' can encrypt any structured data file used by Ansible.
This can include *group_vars/* or *host_vars/* inventory variables,
variables loaded by *include_vars* or *vars_files*, or variable files
passed on the ansible-playbook command line with *-e @file.yml* or *-e @file.json*.
Role variables and defaults are also included!
Because Ansible tasks, handlers, and other objects are data, these can also be encrypted with vault.
If you'd like to not expose what variables you are using, you can keep an individual task file entirely encrypted.
'''
name = 'ansible-vault'
FROM_STDIN = "stdin"
FROM_ARGS = "the command line args"
FROM_PROMPT = "the interactive prompt"
def __init__(self, args):
self.b_vault_pass = None
self.b_new_vault_pass = None
self.encrypt_string_read_stdin = False
self.encrypt_secret = None
self.encrypt_vault_id = None
self.new_encrypt_secret = None
self.new_encrypt_vault_id = None
super(VaultCLI, self).__init__(args)
def init_parser(self):
super(VaultCLI, self).init_parser(
desc="encryption/decryption utility for Ansible data files",
epilog="\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
common = opt_help.argparse.ArgumentParser(add_help=False)
opt_help.add_vault_options(common)
opt_help.add_verbosity_options(common)
subparsers = self.parser.add_subparsers(dest='action')
subparsers.required = True
output = opt_help.argparse.ArgumentParser(add_help=False)
output.add_argument('--output', default=None, dest='output_file',
help='output file name for encrypt or decrypt; use - for stdout',
type=opt_help.unfrack_path())
# For encrypting actions, we can also specify which of multiple vault ids should be used for encrypting
vault_id = opt_help.argparse.ArgumentParser(add_help=False)
vault_id.add_argument('--encrypt-vault-id', default=[], dest='encrypt_vault_id',
action='store', type=str,
help='the vault id used to encrypt (required if more than one vault-id is provided)')
create_parser = subparsers.add_parser('create', help='Create new vault encrypted file', parents=[vault_id, common])
create_parser.set_defaults(func=self.execute_create)
create_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
decrypt_parser = subparsers.add_parser('decrypt', help='Decrypt vault encrypted file', parents=[output, common])
decrypt_parser.set_defaults(func=self.execute_decrypt)
decrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
edit_parser = subparsers.add_parser('edit', help='Edit vault encrypted file', parents=[vault_id, common])
edit_parser.set_defaults(func=self.execute_edit)
edit_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
view_parser = subparsers.add_parser('view', help='View vault encrypted file', parents=[common])
view_parser.set_defaults(func=self.execute_view)
view_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
encrypt_parser = subparsers.add_parser('encrypt', help='Encrypt YAML file', parents=[common, output, vault_id])
encrypt_parser.set_defaults(func=self.execute_encrypt)
encrypt_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
enc_str_parser = subparsers.add_parser('encrypt_string', help='Encrypt a string', parents=[common, output, vault_id])
enc_str_parser.set_defaults(func=self.execute_encrypt_string)
enc_str_parser.add_argument('args', help='String to encrypt', metavar='string_to_encrypt', nargs='*')
enc_str_parser.add_argument('-p', '--prompt', dest='encrypt_string_prompt',
action='store_true',
help="Prompt for the string to encrypt")
enc_str_parser.add_argument('--show-input', dest='show_string_input', default=False, action='store_true',
help='Do not hide input when prompted for the string to encrypt')
enc_str_parser.add_argument('-n', '--name', dest='encrypt_string_names',
action='append',
help="Specify the variable name")
enc_str_parser.add_argument('--stdin-name', dest='encrypt_string_stdin_name',
default=None,
help="Specify the variable name for stdin")
rekey_parser = subparsers.add_parser('rekey', help='Re-key a vault encrypted file', parents=[common, vault_id])
rekey_parser.set_defaults(func=self.execute_rekey)
rekey_new_group = rekey_parser.add_mutually_exclusive_group()
rekey_new_group.add_argument('--new-vault-password-file', default=None, dest='new_vault_password_file',
help="new vault password file for rekey", type=opt_help.unfrack_path())
rekey_new_group.add_argument('--new-vault-id', default=None, dest='new_vault_id', type=str,
help='the new vault identity to use for rekey')
rekey_parser.add_argument('args', help='Filename', metavar='file_name', nargs='*')
def post_process_args(self, options):
options = super(VaultCLI, self).post_process_args(options)
display.verbosity = options.verbosity
if options.vault_ids:
for vault_id in options.vault_ids:
if u';' in vault_id:
raise AnsibleOptionsError("'%s' is not a valid vault id. The character ';' is not allowed in vault ids" % vault_id)
if getattr(options, 'output_file', None) and len(options.args) > 1:
raise AnsibleOptionsError("At most one input file may be used with the --output option")
if options.action == 'encrypt_string':
if '-' in options.args or not options.args or options.encrypt_string_stdin_name:
self.encrypt_string_read_stdin = True
# TODO: prompting from stdin and reading from stdin seem mutually exclusive, but verify that.
if options.encrypt_string_prompt and self.encrypt_string_read_stdin:
raise AnsibleOptionsError('The --prompt option is not supported if also reading input from stdin')
return options
def run(self):
super(VaultCLI, self).run()
loader = DataLoader()
# set default restrictive umask
old_umask = os.umask(0o077)
vault_ids = list(context.CLIARGS['vault_ids'])
# there are 3 types of actions, those that just 'read' (decrypt, view) and only
# need to ask for a password once, and those that 'write' (create, encrypt) that
# ask for a new password and confirm it, and 'read/write (rekey) that asks for the
# old password, then asks for a new one and confirms it.
default_vault_ids = C.DEFAULT_VAULT_IDENTITY_LIST
vault_ids = default_vault_ids + vault_ids
action = context.CLIARGS['action']
# TODO: instead of prompting for these before, we could let VaultEditor
# call a callback when it needs it.
if action in ['decrypt', 'view', 'rekey', 'edit']:
vault_secrets = self.setup_vault_secrets(loader, vault_ids=vault_ids,
vault_password_files=list(context.CLIARGS['vault_password_files']),
ask_vault_pass=context.CLIARGS['ask_vault_pass'])
if not vault_secrets:
raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
if action in ['encrypt', 'encrypt_string', 'create']:
encrypt_vault_id = None
# no --encrypt-vault-id context.CLIARGS['encrypt_vault_id'] for 'edit'
if action not in ['edit']:
encrypt_vault_id = context.CLIARGS['encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
vault_secrets = None
vault_secrets = \
self.setup_vault_secrets(loader,
vault_ids=vault_ids,
vault_password_files=list(context.CLIARGS['vault_password_files']),
ask_vault_pass=context.CLIARGS['ask_vault_pass'],
create_new_password=True)
if len(vault_secrets) > 1 and not encrypt_vault_id:
raise AnsibleOptionsError("The vault-ids %s are available to encrypt. Specify the vault-id to encrypt with --encrypt-vault-id" %
','.join([x[0] for x in vault_secrets]))
if not vault_secrets:
raise AnsibleOptionsError("A vault password is required to use Ansible's Vault")
encrypt_secret = match_encrypt_secret(vault_secrets,
encrypt_vault_id=encrypt_vault_id)
# only one secret for encrypt for now, use the first vault_id and use its first secret
# TODO: exception if more than one?
self.encrypt_vault_id = encrypt_secret[0]
self.encrypt_secret = encrypt_secret[1]
if action in ['rekey']:
encrypt_vault_id = context.CLIARGS['encrypt_vault_id'] or C.DEFAULT_VAULT_ENCRYPT_IDENTITY
# print('encrypt_vault_id: %s' % encrypt_vault_id)
# print('default_encrypt_vault_id: %s' % default_encrypt_vault_id)
# new_vault_ids should only ever be one item, from
# load the default vault ids if we are using encrypt-vault-id
new_vault_ids = []
if encrypt_vault_id:
new_vault_ids = default_vault_ids
if context.CLIARGS['new_vault_id']:
new_vault_ids.append(context.CLIARGS['new_vault_id'])
new_vault_password_files = []
if context.CLIARGS['new_vault_password_file']:
new_vault_password_files.append(context.CLIARGS['new_vault_password_file'])
new_vault_secrets = \
self.setup_vault_secrets(loader,
vault_ids=new_vault_ids,
vault_password_files=new_vault_password_files,
ask_vault_pass=context.CLIARGS['ask_vault_pass'],
create_new_password=True)
if not new_vault_secrets:
raise AnsibleOptionsError("A new vault password is required to use Ansible's Vault rekey")
# There is only one new_vault_id currently and one new_vault_secret, or we
# use the id specified in --encrypt-vault-id
new_encrypt_secret = match_encrypt_secret(new_vault_secrets,
encrypt_vault_id=encrypt_vault_id)
self.new_encrypt_vault_id = new_encrypt_secret[0]
self.new_encrypt_secret = new_encrypt_secret[1]
loader.set_vault_secrets(vault_secrets)
# FIXME: do we need to create VaultEditor here? its not reused
vault = VaultLib(vault_secrets)
self.editor = VaultEditor(vault)
context.CLIARGS['func']()
# and restore umask
os.umask(old_umask)
def execute_encrypt(self):
''' encrypt the supplied file using the provided vault secret '''
if not context.CLIARGS['args'] and sys.stdin.isatty():
display.display("Reading plaintext input from stdin", stderr=True)
for f in context.CLIARGS['args'] or ['-']:
# Fixme: use the correct vau
self.editor.encrypt_file(f, self.encrypt_secret,
vault_id=self.encrypt_vault_id,
output_file=context.CLIARGS['output_file'])
if sys.stdout.isatty():
display.display("Encryption successful", stderr=True)
@staticmethod
def format_ciphertext_yaml(b_ciphertext, indent=None, name=None):
indent = indent or 10
block_format_var_name = ""
if name:
block_format_var_name = "%s: " % name
block_format_header = "%s!vault |" % block_format_var_name
lines = []
vault_ciphertext = to_text(b_ciphertext)
lines.append(block_format_header)
for line in vault_ciphertext.splitlines():
lines.append('%s%s' % (' ' * indent, line))
yaml_ciphertext = '\n'.join(lines)
return yaml_ciphertext
def execute_encrypt_string(self):
''' encrypt the supplied string using the provided vault secret '''
b_plaintext = None
# Holds tuples (the_text, the_source_of_the_string, the variable name if its provided).
b_plaintext_list = []
# remove the non-option '-' arg (used to indicate 'read from stdin') from the candidate args so
# we don't add it to the plaintext list
args = [x for x in context.CLIARGS['args'] if x != '-']
# We can prompt and read input, or read from stdin, but not both.
if context.CLIARGS['encrypt_string_prompt']:
msg = "String to encrypt: "
name = None
name_prompt_response = display.prompt('Variable name (enter for no name): ')
# TODO: enforce var naming rules?
if name_prompt_response != "":
name = name_prompt_response
# TODO: could prompt for which vault_id to use for each plaintext string
# currently, it will just be the default
hide_input = not context.CLIARGS['show_string_input']
if hide_input:
msg = "String to encrypt (hidden): "
else:
msg = "String to encrypt:"
prompt_response = display.prompt(msg, private=hide_input)
if prompt_response == '':
raise AnsibleOptionsError('The plaintext provided from the prompt was empty, not encrypting')
b_plaintext = to_bytes(prompt_response)
b_plaintext_list.append((b_plaintext, self.FROM_PROMPT, name))
# read from stdin
if self.encrypt_string_read_stdin:
if sys.stdout.isatty():
display.display("Reading plaintext input from stdin. (ctrl-d to end input, twice if your content does not already have a newline)", stderr=True)
stdin_text = sys.stdin.read()
if stdin_text == '':
raise AnsibleOptionsError('stdin was empty, not encrypting')
if sys.stdout.isatty() and not stdin_text.endswith("\n"):
display.display("\n")
b_plaintext = to_bytes(stdin_text)
# defaults to None
name = context.CLIARGS['encrypt_string_stdin_name']
b_plaintext_list.append((b_plaintext, self.FROM_STDIN, name))
# use any leftover args as strings to encrypt
# Try to match args up to --name options
if context.CLIARGS.get('encrypt_string_names', False):
name_and_text_list = list(zip(context.CLIARGS['encrypt_string_names'], args))
# Some but not enough --name's to name each var
if len(args) > len(name_and_text_list):
# Trying to avoid ever showing the plaintext in the output, so this warning is vague to avoid that.
display.display('The number of --name options do not match the number of args.',
stderr=True)
display.display('The last named variable will be "%s". The rest will not have'
' names.' % context.CLIARGS['encrypt_string_names'][-1],
stderr=True)
# Add the rest of the args without specifying a name
for extra_arg in args[len(name_and_text_list):]:
name_and_text_list.append((None, extra_arg))
# if no --names are provided, just use the args without a name.
else:
name_and_text_list = [(None, x) for x in args]
# Convert the plaintext text objects to bytestrings and collect
for name_and_text in name_and_text_list:
name, plaintext = name_and_text
if plaintext == '':
raise AnsibleOptionsError('The plaintext provided from the command line args was empty, not encrypting')
b_plaintext = to_bytes(plaintext)
b_plaintext_list.append((b_plaintext, self.FROM_ARGS, name))
# TODO: specify vault_id per string?
# Format the encrypted strings and any corresponding stderr output
outputs = self._format_output_vault_strings(b_plaintext_list, vault_id=self.encrypt_vault_id)
for output in outputs:
err = output.get('err', None)
out = output.get('out', '')
if err:
sys.stderr.write(err)
print(out)
if sys.stdout.isatty():
display.display("Encryption successful", stderr=True)
# TODO: offer block or string ala eyaml
def _format_output_vault_strings(self, b_plaintext_list, vault_id=None):
# If we are only showing one item in the output, we don't need to included commented
# delimiters in the text
show_delimiter = False
if len(b_plaintext_list) > 1:
show_delimiter = True
# list of dicts {'out': '', 'err': ''}
output = []
# Encrypt the plaintext, and format it into a yaml block that can be pasted into a playbook.
# For more than one input, show some differentiating info in the stderr output so we can tell them
# apart. If we have a var name, we include that in the yaml
for index, b_plaintext_info in enumerate(b_plaintext_list):
# (the text itself, which input it came from, its name)
b_plaintext, src, name = b_plaintext_info
b_ciphertext = self.editor.encrypt_bytes(b_plaintext, self.encrypt_secret,
vault_id=vault_id)
# block formatting
yaml_text = self.format_ciphertext_yaml(b_ciphertext, name=name)
err_msg = None
if show_delimiter:
human_index = index + 1
if name:
err_msg = '# The encrypted version of variable ("%s", the string #%d from %s).\n' % (name, human_index, src)
else:
err_msg = '# The encrypted version of the string #%d from %s.)\n' % (human_index, src)
output.append({'out': yaml_text, 'err': err_msg})
return output
def execute_decrypt(self):
''' decrypt the supplied file using the provided vault secret '''
if not context.CLIARGS['args'] and sys.stdin.isatty():
display.display("Reading ciphertext input from stdin", stderr=True)
for f in context.CLIARGS['args'] or ['-']:
self.editor.decrypt_file(f, output_file=context.CLIARGS['output_file'])
if sys.stdout.isatty():
display.display("Decryption successful", stderr=True)
def execute_create(self):
''' create and open a file in an editor that will be encrypted with the provided vault secret when closed'''
if len(context.CLIARGS['args']) != 1:
raise AnsibleOptionsError("ansible-vault create can take only one filename argument")
self.editor.create_file(context.CLIARGS['args'][0], self.encrypt_secret,
vault_id=self.encrypt_vault_id)
def execute_edit(self):
''' open and decrypt an existing vaulted file in an editor, that will be encrypted again when closed'''
for f in context.CLIARGS['args']:
self.editor.edit_file(f)
def execute_view(self):
''' open, decrypt and view an existing vaulted file using a pager using the supplied vault secret '''
for f in context.CLIARGS['args']:
# Note: vault should return byte strings because it could encrypt
# and decrypt binary files. We are responsible for changing it to
# unicode here because we are displaying it and therefore can make
# the decision that the display doesn't have to be precisely what
# the input was (leave that to decrypt instead)
plaintext = self.editor.plaintext(f)
self.pager(to_text(plaintext))
def execute_rekey(self):
''' re-encrypt a vaulted file with a new secret, the previous secret is required '''
for f in context.CLIARGS['args']:
# FIXME: plumb in vault_id, use the default new_vault_secret for now
self.editor.rekey_file(f, self.new_encrypt_secret,
self.new_encrypt_vault_id)
display.display("Rekey successful", stderr=True)
def main(args=None):
VaultCLI.cli_executor(args)
if __name__ == '__main__':
main()
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 59,590 |
Add support for ansible-vault encrypt_string to write results to file more easily
|
<!--- Verify first that your feature was not already discussed on GitHub -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
<!--- Describe the new feature/improvement briefly below -->
Whenever I use `ansible-vault encrypt_string`, I want to take the result and add it to a variable file, however I have to manually select the text/copy/open file for editing/paste/save the change. This process is painful especially when you have multiple strings to encrypt as it doesn't work with the Unix way of things thus being very manual. I thought `ansible-vault encrypt_string --output` might be something to help here, but the help documentation is confusing as it appears this is only used with `encrypt` and `decrypt` _(commands)_.
##### ISSUE TYPE
- Feature Idea
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
lib/ansible/cli/vault.py
##### ADDITIONAL INFORMATION
<!--- Describe how the feature would be used, why it is needed and what it would solve -->
The only way I can see this working _without coming up with custom file descriptors or repurposing stderr for encrypted output so redirection will work_ is to add a new option that could append the resulting string to a file.
```shell
ansible-vault encrypt_string --encrypt-vault-id default --stdin-name --append vars_file.yml
Reading plaintext input from stdin. (ctrl-d to end input)
hunter22
Encryption successful
```
|
https://github.com/ansible/ansible/issues/59590
|
https://github.com/ansible/ansible/pull/76856
|
18ed2c64e57816f0a8ecd2fbda3e5538b45da5ea
|
f501b579e51c05565d852feb676fa38cddf3f2ed
| 2019-07-25T13:30:25Z |
python
| 2022-01-28T08:34:59Z |
test/integration/targets/ansible-vault/runme.sh
|
#!/usr/bin/env bash
set -euvx
source virtualenv.sh
MYTMPDIR=$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir')
trap 'rm -rf "${MYTMPDIR}"' EXIT
# create a test file
TEST_FILE="${MYTMPDIR}/test_file"
echo "This is a test file" > "${TEST_FILE}"
TEST_FILE_1_2="${MYTMPDIR}/test_file_1_2"
echo "This is a test file for format 1.2" > "${TEST_FILE_1_2}"
TEST_FILE_ENC_PASSWORD="${MYTMPDIR}/test_file_enc_password"
echo "This is a test file for encrypted with a vault password that is itself vault encrypted" > "${TEST_FILE_ENC_PASSWORD}"
TEST_FILE_ENC_PASSWORD_DEFAULT="${MYTMPDIR}/test_file_enc_password_default"
echo "This is a test file for encrypted with a vault password that is itself vault encrypted using --encrypted-vault-id default" > "${TEST_FILE_ENC_PASSWORD_DEFAULT}"
TEST_FILE_OUTPUT="${MYTMPDIR}/test_file_output"
TEST_FILE_EDIT="${MYTMPDIR}/test_file_edit"
echo "This is a test file for edit" > "${TEST_FILE_EDIT}"
TEST_FILE_EDIT2="${MYTMPDIR}/test_file_edit2"
echo "This is a test file for edit2" > "${TEST_FILE_EDIT2}"
# test case for https://github.com/ansible/ansible/issues/35834
# (being prompted for new password on vault-edit with no configured passwords)
TEST_FILE_EDIT3="${MYTMPDIR}/test_file_edit3"
echo "This is a test file for edit3" > "${TEST_FILE_EDIT3}"
# ansible-config view
ansible-config view
# ansible-config
ansible-config dump --only-changed
ansible-vault encrypt "$@" --vault-id vault-password "${TEST_FILE_EDIT3}"
# EDITOR=./faux-editor.py ansible-vault edit "$@" "${TEST_FILE_EDIT3}"
EDITOR=./faux-editor.py ansible-vault edit --vault-id vault-password -vvvvv "${TEST_FILE_EDIT3}"
echo $?
# view the vault encrypted password file
ansible-vault view "$@" --vault-id vault-password encrypted-vault-password
# encrypt with a password from a vault encrypted password file and multiple vault-ids
# should fail because we dont know which vault id to use to encrypt with
ansible-vault encrypt "$@" --vault-id vault-password --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}" && :
WRONG_RC=$?
echo "rc was $WRONG_RC (5 is expected)"
[ $WRONG_RC -eq 5 ]
# try to view the file encrypted with the vault-password we didnt specify
# to verify we didnt choose the wrong vault-id
ansible-vault view "$@" --vault-id vault-password encrypted-vault-password
FORMAT_1_1_HEADER="\$ANSIBLE_VAULT;1.1;AES256"
FORMAT_1_2_HEADER="\$ANSIBLE_VAULT;1.2;AES256"
VAULT_PASSWORD_FILE=vault-password
# new format, view, using password client script
ansible-vault view "$@" --vault-id [email protected] format_1_1_AES256.yml
# view, using password client script, unknown vault/keyname
ansible-vault view "$@" --vault-id [email protected] format_1_1_AES256.yml && :
# Use linux setsid to test without a tty. No setsid if osx/bsd though...
if [ -x "$(command -v setsid)" ]; then
# tests related to https://github.com/ansible/ansible/issues/30993
CMD='ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml'
setsid sh -c "echo test-vault-password|${CMD}" < /dev/null > log 2>&1 && :
WRONG_RC=$?
cat log
echo "rc was $WRONG_RC (0 is expected)"
[ $WRONG_RC -eq 0 ]
setsid sh -c 'tty; ansible-vault view --ask-vault-pass -vvvvv test_vault.yml' < /dev/null > log 2>&1 && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
cat log
setsid sh -c 'tty; echo passbhkjhword|ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1 && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
cat log
setsid sh -c 'tty; echo test-vault-password |ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1
echo $?
cat log
setsid sh -c 'tty; echo test-vault-password|ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1
echo $?
cat log
setsid sh -c 'tty; echo test-vault-password |ansible-playbook -i ../../inventory -vvvvv --ask-vault-pass test_vault.yml' < /dev/null > log 2>&1
echo $?
cat log
setsid sh -c 'tty; echo test-vault-password|ansible-vault view --ask-vault-pass -vvvvv vaulted.inventory' < /dev/null > log 2>&1
echo $?
cat log
# test using --ask-vault-password option
CMD='ansible-playbook -i ../../inventory -vvvvv --ask-vault-password test_vault.yml'
setsid sh -c "echo test-vault-password|${CMD}" < /dev/null > log 2>&1 && :
WRONG_RC=$?
cat log
echo "rc was $WRONG_RC (0 is expected)"
[ $WRONG_RC -eq 0 ]
fi
ansible-vault view "$@" --vault-password-file vault-password-wrong format_1_1_AES256.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
set -eux
# new format, view
ansible-vault view "$@" --vault-password-file vault-password format_1_1_AES256.yml
# new format, view with vault-id
ansible-vault view "$@" --vault-id=vault-password format_1_1_AES256.yml
# new format, view, using password script
ansible-vault view "$@" --vault-password-file password-script.py format_1_1_AES256.yml
# new format, view, using password script with vault-id
ansible-vault view "$@" --vault-id password-script.py format_1_1_AES256.yml
# new 1.2 format, view
ansible-vault view "$@" --vault-password-file vault-password format_1_2_AES256.yml
# new 1.2 format, view with vault-id
ansible-vault view "$@" --vault-id=test_vault_id@vault-password format_1_2_AES256.yml
# new 1,2 format, view, using password script
ansible-vault view "$@" --vault-password-file password-script.py format_1_2_AES256.yml
# new 1.2 format, view, using password script with vault-id
ansible-vault view "$@" --vault-id password-script.py format_1_2_AES256.yml
# newish 1.1 format, view, using a vault-id list from config env var
ANSIBLE_VAULT_IDENTITY_LIST='wrong-password@vault-password-wrong,default@vault-password' ansible-vault view "$@" --vault-id password-script.py format_1_1_AES256.yml
# new 1.2 format, view, ENFORCE_IDENTITY_MATCH=true, should fail, no 'test_vault_id' vault_id
ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-password-file vault-password format_1_2_AES256.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# new 1.2 format, view with vault-id, ENFORCE_IDENTITY_MATCH=true, should work, 'test_vault_id' is provided
ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-id=test_vault_id@vault-password format_1_2_AES256.yml
# new 1,2 format, view, using password script, ENFORCE_IDENTITY_MATCH=true, should fail, no 'test_vault_id'
ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-password-file password-script.py format_1_2_AES256.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# new 1.2 format, view, using password script with vault-id, ENFORCE_IDENTITY_MATCH=true, should fail
ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" --vault-id password-script.py format_1_2_AES256.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# new 1.2 format, view, using password script with vault-id, ENFORCE_IDENTITY_MATCH=true, 'test_vault_id' provided should work
ANSIBLE_VAULT_ID_MATCH=1 ansible-vault view "$@" [email protected] format_1_2_AES256.yml
# test with a default vault password set via config/env, right password
ANSIBLE_VAULT_PASSWORD_FILE=vault-password ansible-vault view "$@" format_1_1_AES256.yml
# test with a default vault password set via config/env, wrong password
ANSIBLE_VAULT_PASSWORD_FILE=vault-password-wrong ansible-vault view "$@" format_1_1_AES.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# test with a default vault-id list set via config/env, right password
ANSIBLE_VAULT_PASSWORD_FILE=wrong@vault-password-wrong,correct@vault-password ansible-vault view "$@" format_1_1_AES.yml && :
# test with a default vault-id list set via config/env,wrong passwords
ANSIBLE_VAULT_PASSWORD_FILE=wrong@vault-password-wrong,alsowrong@vault-password-wrong ansible-vault view "$@" format_1_1_AES.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# try specifying a --encrypt-vault-id that doesnt exist, should exit with an error indicating
# that --encrypt-vault-id and the known vault-ids
ansible-vault encrypt "$@" --vault-password-file vault-password --encrypt-vault-id doesnt_exist "${TEST_FILE}" && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# encrypt it
ansible-vault encrypt "$@" --vault-password-file vault-password "${TEST_FILE}"
ansible-vault view "$@" --vault-password-file vault-password "${TEST_FILE}"
# view with multiple vault-password files, including a wrong one
ansible-vault view "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong "${TEST_FILE}"
# view with multiple vault-password files, including a wrong one, using vault-id
ansible-vault view "$@" --vault-id vault-password --vault-id vault-password-wrong "${TEST_FILE}"
# And with the password files specified in a different order
ansible-vault view "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password "${TEST_FILE}"
# And with the password files specified in a different order, using vault-id
ansible-vault view "$@" --vault-id vault-password-wrong --vault-id vault-password "${TEST_FILE}"
# And with the password files specified in a different order, using --vault-id and non default vault_ids
ansible-vault view "$@" --vault-id test_vault_id@vault-password-wrong --vault-id test_vault_id@vault-password "${TEST_FILE}"
ansible-vault decrypt "$@" --vault-password-file vault-password "${TEST_FILE}"
# encrypt it, using a vault_id so we write a 1.2 format file
ansible-vault encrypt "$@" --vault-id test_vault_1_2@vault-password "${TEST_FILE_1_2}"
ansible-vault view "$@" --vault-id vault-password "${TEST_FILE_1_2}"
ansible-vault view "$@" --vault-id test_vault_1_2@vault-password "${TEST_FILE_1_2}"
# view with multiple vault-password files, including a wrong one
ansible-vault view "$@" --vault-id vault-password --vault-id wrong_password@vault-password-wrong "${TEST_FILE_1_2}"
# And with the password files specified in a different order, using vault-id
ansible-vault view "$@" --vault-id vault-password-wrong --vault-id vault-password "${TEST_FILE_1_2}"
# And with the password files specified in a different order, using --vault-id and non default vault_ids
ansible-vault view "$@" --vault-id test_vault_id@vault-password-wrong --vault-id test_vault_id@vault-password "${TEST_FILE_1_2}"
ansible-vault decrypt "$@" --vault-id test_vault_1_2@vault-password "${TEST_FILE_1_2}"
# multiple vault passwords
ansible-vault view "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong format_1_1_AES256.yml
# multiple vault passwords, --vault-id
ansible-vault view "$@" --vault-id test_vault_id@vault-password --vault-id test_vault_id@vault-password-wrong format_1_1_AES256.yml
# encrypt it, with password from password script
ansible-vault encrypt "$@" --vault-password-file password-script.py "${TEST_FILE}"
ansible-vault view "$@" --vault-password-file password-script.py "${TEST_FILE}"
ansible-vault decrypt "$@" --vault-password-file password-script.py "${TEST_FILE}"
# encrypt it, with password from password script
ansible-vault encrypt "$@" --vault-id [email protected] "${TEST_FILE}"
ansible-vault view "$@" --vault-id [email protected] "${TEST_FILE}"
ansible-vault decrypt "$@" --vault-id [email protected] "${TEST_FILE}"
# new password file for rekeyed file
NEW_VAULT_PASSWORD="${MYTMPDIR}/new-vault-password"
echo "newpassword" > "${NEW_VAULT_PASSWORD}"
ansible-vault encrypt "$@" --vault-password-file vault-password "${TEST_FILE}"
ansible-vault rekey "$@" --vault-password-file vault-password --new-vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}"
# --new-vault-password-file and --new-vault-id should cause options error
ansible-vault rekey "$@" --vault-password-file vault-password --new-vault-id=foobar --new-vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}" && :
WRONG_RC=$?
echo "rc was $WRONG_RC (2 is expected)"
[ $WRONG_RC -eq 2 ]
ansible-vault view "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}"
# view file with unicode in filename
ansible-vault view "$@" --vault-password-file vault-password vault-café.yml
# view with old password file and new password file
ansible-vault view "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --vault-password-file vault-password "${TEST_FILE}"
# view with old password file and new password file, different order
ansible-vault view "$@" --vault-password-file vault-password --vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}"
# view with old password file and new password file and another wrong
ansible-vault view "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --vault-password-file vault-password-wrong --vault-password-file vault-password "${TEST_FILE}"
# view with old password file and new password file and another wrong, using --vault-id
ansible-vault view "$@" --vault-id "tmp_new_password@${NEW_VAULT_PASSWORD}" --vault-id wrong_password@vault-password-wrong --vault-id myorg@vault-password "${TEST_FILE}"
ansible-vault decrypt "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" "${TEST_FILE}"
# reading/writing to/from stdin/stdin (See https://github.com/ansible/ansible/issues/23567)
ansible-vault encrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output="${TEST_FILE_OUTPUT}" < "${TEST_FILE}"
OUTPUT=$(ansible-vault decrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output=- < "${TEST_FILE_OUTPUT}")
echo "${OUTPUT}" | grep 'This is a test file'
OUTPUT_DASH=$(ansible-vault decrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output=- "${TEST_FILE_OUTPUT}")
echo "${OUTPUT_DASH}" | grep 'This is a test file'
OUTPUT_DASH_SPACE=$(ansible-vault decrypt "$@" --vault-password-file "${VAULT_PASSWORD_FILE}" --output - "${TEST_FILE_OUTPUT}")
echo "${OUTPUT_DASH_SPACE}" | grep 'This is a test file'
# test using an empty vault password file
ansible-vault view "$@" --vault-password-file empty-password format_1_1_AES256.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
ansible-vault view "$@" --vault-id=empty@empty-password --vault-password-file empty-password format_1_1_AES256.yml && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
echo 'foo' > some_file.txt
ansible-vault encrypt "$@" --vault-password-file empty-password some_file.txt && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" "a test string"
# Test with multiple vault password files
# https://github.com/ansible/ansible/issues/57172
env ANSIBLE_VAULT_PASSWORD_FILE=vault-password ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --encrypt-vault-id default "a test string"
ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --name "blippy" "a test string names blippy"
ansible-vault encrypt_string "$@" --vault-id "${NEW_VAULT_PASSWORD}" "a test string"
ansible-vault encrypt_string "$@" --vault-id "${NEW_VAULT_PASSWORD}" --name "blippy" "a test string names blippy"
# from stdin
ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" < "${TEST_FILE}"
ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --stdin-name "the_var_from_stdin" < "${TEST_FILE}"
# write to file
ansible-vault encrypt_string "$@" --vault-password-file "${NEW_VAULT_PASSWORD}" --name "blippy" "a test string names blippy" --output "${MYTMPDIR}/enc_string_test_file"
# test ansible-vault edit with a faux editor
ansible-vault encrypt "$@" --vault-password-file vault-password "${TEST_FILE_EDIT}"
# edit a 1.1 format with no vault-id, should stay 1.1
EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-password-file vault-password "${TEST_FILE_EDIT}"
head -1 "${TEST_FILE_EDIT}" | grep "${FORMAT_1_1_HEADER}"
# edit a 1.1 format with vault-id, should stay 1.1
cat "${TEST_FILE_EDIT}"
EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-id vault_password@vault-password "${TEST_FILE_EDIT}"
cat "${TEST_FILE_EDIT}"
head -1 "${TEST_FILE_EDIT}" | grep "${FORMAT_1_1_HEADER}"
ansible-vault encrypt "$@" --vault-id vault_password@vault-password "${TEST_FILE_EDIT2}"
# verify that we aren't prompted for a new vault password on edit if we are running interactively (ie, with prompts)
# have to use setsid nd --ask-vault-pass to force a prompt to simulate.
# See https://github.com/ansible/ansible/issues/35834
setsid sh -c 'tty; echo password |ansible-vault edit --ask-vault-pass vault_test.yml' < /dev/null > log 2>&1 && :
grep 'New Vault password' log && :
WRONG_RC=$?
echo "The stdout log had 'New Vault password' in it and it is not supposed to. rc of grep was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# edit a 1.2 format with vault id, should keep vault id and 1.2 format
EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-id vault_password@vault-password "${TEST_FILE_EDIT2}"
head -1 "${TEST_FILE_EDIT2}" | grep "${FORMAT_1_2_HEADER};vault_password"
# edit a 1.2 file with no vault-id, should keep vault id and 1.2 format
EDITOR=./faux-editor.py ansible-vault edit "$@" --vault-password-file vault-password "${TEST_FILE_EDIT2}"
head -1 "${TEST_FILE_EDIT2}" | grep "${FORMAT_1_2_HEADER};vault_password"
# encrypt with a password from a vault encrypted password file and multiple vault-ids
# should fail because we dont know which vault id to use to encrypt with
ansible-vault encrypt "$@" --vault-id vault-password --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}" && :
WRONG_RC=$?
echo "rc was $WRONG_RC (5 is expected)"
[ $WRONG_RC -eq 5 ]
# encrypt with a password from a vault encrypted password file and multiple vault-ids
# but this time specify with --encrypt-vault-id, but specifying vault-id names (instead of default)
# ansible-vault encrypt "$@" --vault-id from_vault_password@vault-password --vault-id from_encrypted_vault_password@encrypted-vault-password --encrypt-vault-id from_encrypted_vault_password "${TEST_FILE(_ENC_PASSWORD}"
# try to view the file encrypted with the vault-password we didnt specify
# to verify we didnt choose the wrong vault-id
# ansible-vault view "$@" --vault-id vault-password "${TEST_FILE_ENC_PASSWORD}" && :
# WRONG_RC=$?
# echo "rc was $WRONG_RC (1 is expected)"
# [ $WRONG_RC -eq 1 ]
ansible-vault encrypt "$@" --vault-id vault-password "${TEST_FILE_ENC_PASSWORD}"
# view the file encrypted with a password from a vault encrypted password file
ansible-vault view "$@" --vault-id vault-password --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}"
# try to view the file encrypted with a password from a vault encrypted password file but without the password to the password file.
# This should fail with an
ansible-vault view "$@" --vault-id encrypted-vault-password "${TEST_FILE_ENC_PASSWORD}" && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# test playbooks using vaulted files
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --list-tasks
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --list-hosts
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --syntax-check
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password --syntax-check
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password
ansible-playbook test_vaulted_inventory.yml -i vaulted.inventory -v "$@" --vault-password-file vault-password
ansible-playbook test_vaulted_template.yml -i ../../inventory -v "$@" --vault-password-file vault-password
# test using --vault-pass-file option
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-pass-file vault-password
# install TOML for parse toml inventory
# test playbooks using vaulted files(toml)
pip install toml
ansible-vault encrypt ./inventory.toml -v "$@" --vault-password-file=./vault-password
ansible-playbook test_vaulted_inventory_toml.yml -i ./inventory.toml -v "$@" --vault-password-file vault-password
ansible-vault decrypt ./inventory.toml -v "$@" --vault-password-file=./vault-password
# test a playbook with a host_var whose value is non-ascii utf8 (see https://github.com/ansible/ansible/issues/37258)
ansible-playbook -i ../../inventory -v "$@" --vault-id vault-password test_vaulted_utf8_value.yml
# test with password from password script
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file password-script.py
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file password-script.py
# with multiple password files
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password --vault-password-file vault-password-wrong --syntax-check
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password
# test with a default vault password file set in config
ANSIBLE_VAULT_PASSWORD_FILE=vault-password ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong
# test using vault_identity_list config
ANSIBLE_VAULT_IDENTITY_LIST='wrong-password@vault-password-wrong,default@vault-password' ansible-playbook test_vault.yml -i ../../inventory -v "$@"
# test that we can have a vault encrypted yaml file that includes embedded vault vars
# that were encrypted with a different vault secret
ansible-playbook test_vault_file_encrypted_embedded.yml -i ../../inventory "$@" --vault-id encrypted_file_encrypted_var_password --vault-id vault-password
# with multiple password files, --vault-id, ordering
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password --vault-id vault-password-wrong
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong --vault-id vault-password
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-id vault-password --vault-id vault-password-wrong --syntax-check
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong --vault-id vault-password
# test with multiple password files, including a script, and a wrong password
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file password-script.py --vault-password-file vault-password
# test with multiple password files, including a script, and a wrong password, and a mix of --vault-id and --vault-password-file
ansible-playbook test_vault_embedded.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-id password-script.py --vault-id vault-password
# test with multiple password files, including a script, and a wrong password, and a mix of --vault-id and --vault-password-file
ansible-playbook test_vault_embedded_ids.yml -i ../../inventory -v "$@" \
--vault-password-file vault-password-wrong \
--vault-id password-script.py --vault-id example1@example1_password \
--vault-id example2@example2_password --vault-password-file example3_password \
--vault-id vault-password
# with wrong password
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# with multiple wrong passwords
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-password-file vault-password-wrong --vault-password-file vault-password-wrong && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# with wrong password, --vault-id
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# with multiple wrong passwords with --vault-id
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id vault-password-wrong --vault-id vault-password-wrong && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# with multiple wrong passwords with --vault-id
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id wrong1@vault-password-wrong --vault-id wrong2@vault-password-wrong && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# with empty password file
ansible-playbook test_vault.yml -i ../../inventory -v "$@" --vault-id empty@empty-password && :
WRONG_RC=$?
echo "rc was $WRONG_RC (1 is expected)"
[ $WRONG_RC -eq 1 ]
# test invalid format ala https://github.com/ansible/ansible/issues/28038
EXPECTED_ERROR='Vault format unhexlify error: Non-hexadecimal digit found'
ansible-playbook "$@" -i invalid_format/inventory --vault-id invalid_format/vault-secret invalid_format/broken-host-vars-tasks.yml 2>&1 | grep "${EXPECTED_ERROR}"
EXPECTED_ERROR='Vault format unhexlify error: Odd-length string'
ansible-playbook "$@" -i invalid_format/inventory --vault-id invalid_format/vault-secret invalid_format/broken-group-vars-tasks.yml 2>&1 | grep "${EXPECTED_ERROR}"
# Run playbook with vault file with unicode in filename (https://github.com/ansible/ansible/issues/50316)
ansible-playbook -i ../../inventory -v "$@" --vault-password-file vault-password test_utf8_value_in_filename.yml
# Ensure we don't leave unencrypted temp files dangling
ansible-playbook -v "$@" --vault-password-file vault-password test_dangling_temp.yml
ansible-playbook "$@" --vault-password-file vault-password single_vault_as_string.yml
# Test that only one accessible vault password is required
export ANSIBLE_VAULT_IDENTITY_LIST="id1@./nonexistent, id2@${MYTMPDIR}/unreadable, id3@./vault-password"
touch "${MYTMPDIR}/unreadable"
sudo chmod 000 "${MYTMPDIR}/unreadable"
ansible-vault encrypt_string content
ansible-vault encrypt_string content --encrypt-vault-id id3
set +e
# Try to use a missing vault password file
ansible-vault encrypt_string content --encrypt-vault-id id1 2>&1 | tee out.txt
test $? -ne 0
grep out.txt -e '[WARNING]: Error getting vault password file (id1)'
grep out.txt -e "ERROR! Did not find a match for --encrypt-vault-id=id2 in the known vault-ids ['id3']"
# Try to use an inaccessible vault password file
ansible-vault encrypt_string content --encrypt-vault-id id2 2>&1 | tee out.txt
test $? -ne 0
grep out.txt -e "[WARNING]: Error in vault password file loading (id2)"
grep out.txt -e "ERROR! Did not find a match for --encrypt-vault-id=id2 in the known vault-ids ['id3']"
set -e
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,286 |
`vars:` keyword on inner `block:` does not override `vars:` in outer `block:`
|
### Summary
If a var is defined in the `vars:` of a block, and then redefined again in the `vars:` of an inner block, the outer value is still used.
`vars:` on a _task_ in the inner block work as expected.
### Issue Type
Bug Report
### Component Name
unknown
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.0.dev0] (devel 1fc1ab89ae) last updated 2021/07/17 12:29:56 (GMT -400)
config file = None
configured module search path = ['/home/briantist/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/briantist/code/ansible/ansible.core/lib/ansible
ansible collection location = /home/briantist/.ansible/collections:/usr/share/ansible/collections
executable location = /home/briantist/code/ansible/ansible.core/bin/ansible
python version = 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0]
jinja version = 3.0.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
CALLBACKS_ENABLED(env: ANSIBLE_CALLBACK_WHITELIST) = ['profile_tasks']
DEFAULT_FORKS(env: ANSIBLE_FORKS) = 30
INVENTORY_ENABLED(env: ANSIBLE_INVENTORY_ENABLED) = ['host_list', 'auto', 'ini', 'yaml', 'toml']
```
### OS / Environment
Ubuntu 18.04 on WSL2
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: outer
vars:
myvar: abc
block:
- debug:
var: myvar
- name: inner block
vars:
myvar: 123
block:
- debug:
var: myvar
- name: task
vars:
myvar: xyz
debug:
var: myvar
```
### Expected Results
inner block first `debug` result is `123`
### Actual Results
inner block first `debug` result is `abc`
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75286
|
https://github.com/ansible/ansible/pull/75287
|
29b5eb6ba9fb652ddd8dd06cdd8f2e80e2098063
|
b1d6750e8bbdf507a4af24a4319b78b7287e233c
| 2021-07-20T14:27:01Z |
python
| 2022-01-31T17:21:50Z |
changelogs/fragments/fix_block_var_inh.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,286 |
`vars:` keyword on inner `block:` does not override `vars:` in outer `block:`
|
### Summary
If a var is defined in the `vars:` of a block, and then redefined again in the `vars:` of an inner block, the outer value is still used.
`vars:` on a _task_ in the inner block work as expected.
### Issue Type
Bug Report
### Component Name
unknown
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.0.dev0] (devel 1fc1ab89ae) last updated 2021/07/17 12:29:56 (GMT -400)
config file = None
configured module search path = ['/home/briantist/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/briantist/code/ansible/ansible.core/lib/ansible
ansible collection location = /home/briantist/.ansible/collections:/usr/share/ansible/collections
executable location = /home/briantist/code/ansible/ansible.core/bin/ansible
python version = 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0]
jinja version = 3.0.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
CALLBACKS_ENABLED(env: ANSIBLE_CALLBACK_WHITELIST) = ['profile_tasks']
DEFAULT_FORKS(env: ANSIBLE_FORKS) = 30
INVENTORY_ENABLED(env: ANSIBLE_INVENTORY_ENABLED) = ['host_list', 'auto', 'ini', 'yaml', 'toml']
```
### OS / Environment
Ubuntu 18.04 on WSL2
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: outer
vars:
myvar: abc
block:
- debug:
var: myvar
- name: inner block
vars:
myvar: 123
block:
- debug:
var: myvar
- name: task
vars:
myvar: xyz
debug:
var: myvar
```
### Expected Results
inner block first `debug` result is `123`
### Actual Results
inner block first `debug` result is `abc`
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75286
|
https://github.com/ansible/ansible/pull/75287
|
29b5eb6ba9fb652ddd8dd06cdd8f2e80e2098063
|
b1d6750e8bbdf507a4af24a4319b78b7287e233c
| 2021-07-20T14:27:01Z |
python
| 2022-01-31T17:21:50Z |
lib/ansible/playbook/block.py
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ansible.constants as C
from ansible.errors import AnsibleParserError
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.conditional import Conditional
from ansible.playbook.collectionsearch import CollectionSearch
from ansible.playbook.helpers import load_list_of_tasks
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
from ansible.utils.sentinel import Sentinel
class Block(Base, Conditional, CollectionSearch, Taggable):
# main block fields containing the task lists
_block = FieldAttribute(isa='list', default=list, inherit=False)
_rescue = FieldAttribute(isa='list', default=list, inherit=False)
_always = FieldAttribute(isa='list', default=list, inherit=False)
# other fields for task compat
_notify = FieldAttribute(isa='list')
_delegate_to = FieldAttribute(isa='string')
_delegate_facts = FieldAttribute(isa='bool')
# for future consideration? this would be functionally
# similar to the 'else' clause for exceptions
# _otherwise = FieldAttribute(isa='list')
def __init__(self, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, implicit=False):
self._play = play
self._role = role
self._parent = None
self._dep_chain = None
self._use_handlers = use_handlers
self._implicit = implicit
if task_include:
self._parent = task_include
elif parent_block:
self._parent = parent_block
super(Block, self).__init__()
def __repr__(self):
return "BLOCK(uuid=%s)(id=%s)(parent=%s)" % (self._uuid, id(self), self._parent)
def __eq__(self, other):
'''object comparison based on _uuid'''
return self._uuid == other._uuid
def __ne__(self, other):
'''object comparison based on _uuid'''
return self._uuid != other._uuid
def get_vars(self):
'''
Blocks do not store variables directly, however they may be a member
of a role or task include which does, so return those if present.
'''
all_vars = self.vars.copy()
if self._parent:
all_vars.update(self._parent.get_vars())
return all_vars
@staticmethod
def load(data, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
implicit = not Block.is_block(data)
b = Block(play=play, parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers, implicit=implicit)
return b.load_data(data, variable_manager=variable_manager, loader=loader)
@staticmethod
def is_block(ds):
is_block = False
if isinstance(ds, dict):
for attr in ('block', 'rescue', 'always'):
if attr in ds:
is_block = True
break
return is_block
def preprocess_data(self, ds):
'''
If a simple task is given, an implicit block for that single task
is created, which goes in the main portion of the block
'''
if not Block.is_block(ds):
if isinstance(ds, list):
return super(Block, self).preprocess_data(dict(block=ds))
else:
return super(Block, self).preprocess_data(dict(block=[ds]))
return super(Block, self).preprocess_data(ds)
def _load_block(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=None,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading a block", obj=self._ds, orig_exc=e)
def _load_rescue(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=None,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading rescue.", obj=self._ds, orig_exc=e)
def _load_always(self, attr, ds):
try:
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=None,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
except AssertionError as e:
raise AnsibleParserError("A malformed block was encountered while loading always", obj=self._ds, orig_exc=e)
def _validate_always(self, attr, name, value):
if value and not self.block:
raise AnsibleParserError("'%s' keyword cannot be used without 'block'" % name, obj=self._ds)
_validate_rescue = _validate_always
def get_dep_chain(self):
if self._dep_chain is None:
if self._parent:
return self._parent.get_dep_chain()
else:
return None
else:
return self._dep_chain[:]
def copy(self, exclude_parent=False, exclude_tasks=False):
def _dupe_task_list(task_list, new_block):
new_task_list = []
for task in task_list:
new_task = task.copy(exclude_parent=True)
if task._parent:
new_task._parent = task._parent.copy(exclude_tasks=True)
if task._parent == new_block:
# If task._parent is the same as new_block, just replace it
new_task._parent = new_block
else:
# task may not be a direct child of new_block, search for the correct place to insert new_block
cur_obj = new_task._parent
while cur_obj._parent and cur_obj._parent != new_block:
cur_obj = cur_obj._parent
cur_obj._parent = new_block
else:
new_task._parent = new_block
new_task_list.append(new_task)
return new_task_list
new_me = super(Block, self).copy()
new_me._play = self._play
new_me._use_handlers = self._use_handlers
if self._dep_chain is not None:
new_me._dep_chain = self._dep_chain[:]
new_me._parent = None
if self._parent and not exclude_parent:
new_me._parent = self._parent.copy(exclude_tasks=True)
if not exclude_tasks:
new_me.block = _dupe_task_list(self.block or [], new_me)
new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
new_me.always = _dupe_task_list(self.always or [], new_me)
new_me._role = None
if self._role:
new_me._role = self._role
new_me.validate()
return new_me
def serialize(self):
'''
Override of the default serialize method, since when we're serializing
a task we don't want to include the attribute list of tasks.
'''
data = dict()
for attr in self._valid_attrs:
if attr not in ('block', 'rescue', 'always'):
data[attr] = getattr(self, attr)
data['dep_chain'] = self.get_dep_chain()
if self._role is not None:
data['role'] = self._role.serialize()
if self._parent is not None:
data['parent'] = self._parent.copy(exclude_tasks=True).serialize()
data['parent_type'] = self._parent.__class__.__name__
return data
def deserialize(self, data):
'''
Override of the default deserialize method, to match the above overridden
serialize method
'''
# import is here to avoid import loops
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.handler_task_include import HandlerTaskInclude
# we don't want the full set of attributes (the task lists), as that
# would lead to a serialize/deserialize loop
for attr in self._valid_attrs:
if attr in data and attr not in ('block', 'rescue', 'always'):
setattr(self, attr, data.get(attr))
self._dep_chain = data.get('dep_chain', None)
# if there was a serialized role, unpack it too
role_data = data.get('role')
if role_data:
r = Role()
r.deserialize(role_data)
self._role = r
parent_data = data.get('parent')
if parent_data:
parent_type = data.get('parent_type')
if parent_type == 'Block':
p = Block()
elif parent_type == 'TaskInclude':
p = TaskInclude()
elif parent_type == 'HandlerTaskInclude':
p = HandlerTaskInclude()
p.deserialize(parent_data)
self._parent = p
self._dep_chain = self._parent.get_dep_chain()
def set_loader(self, loader):
self._loader = loader
if self._parent:
self._parent.set_loader(loader)
elif self._role:
self._role.set_loader(loader)
dep_chain = self.get_dep_chain()
if dep_chain:
for dep in dep_chain:
dep.set_loader(loader)
def _get_parent_attribute(self, attr, extend=False, prepend=False):
'''
Generic logic to get the attribute or parent attribute for a block value.
'''
extend = self._valid_attrs[attr].extend
prepend = self._valid_attrs[attr].prepend
try:
value = self._attributes[attr]
# If parent is static, we can grab attrs from the parent
# otherwise, defer to the grandparent
if getattr(self._parent, 'statically_loaded', True):
_parent = self._parent
else:
_parent = self._parent._parent
if _parent and (value is Sentinel or extend):
try:
if getattr(_parent, 'statically_loaded', True):
if hasattr(_parent, '_get_parent_attribute'):
parent_value = _parent._get_parent_attribute(attr)
else:
parent_value = _parent._attributes.get(attr, Sentinel)
if extend:
value = self._extend_value(value, parent_value, prepend)
else:
value = parent_value
except AttributeError:
pass
if self._role and (value is Sentinel or extend):
try:
parent_value = self._role._attributes.get(attr, Sentinel)
if extend:
value = self._extend_value(value, parent_value, prepend)
else:
value = parent_value
dep_chain = self.get_dep_chain()
if dep_chain and (value is Sentinel or extend):
dep_chain.reverse()
for dep in dep_chain:
dep_value = dep._attributes.get(attr, Sentinel)
if extend:
value = self._extend_value(value, dep_value, prepend)
else:
value = dep_value
if value is not Sentinel and not extend:
break
except AttributeError:
pass
if self._play and (value is Sentinel or extend):
try:
play_value = self._play._attributes.get(attr, Sentinel)
if play_value is not Sentinel:
if extend:
value = self._extend_value(value, play_value, prepend)
else:
value = play_value
except AttributeError:
pass
except KeyError:
pass
return value
def filter_tagged_tasks(self, all_vars):
'''
Creates a new block, with task lists filtered based on the tags.
'''
def evaluate_and_append_task(target):
tmp_list = []
for task in target:
if isinstance(task, Block):
filtered_block = evaluate_block(task)
if filtered_block.has_tasks():
tmp_list.append(filtered_block)
elif ((task.action in C._ACTION_META and task.implicit) or
(task.action in C._ACTION_INCLUDE and task.evaluate_tags([], self._play.skip_tags, all_vars=all_vars)) or
task.evaluate_tags(self._play.only_tags, self._play.skip_tags, all_vars=all_vars)):
tmp_list.append(task)
return tmp_list
def evaluate_block(block):
new_block = block.copy(exclude_parent=True, exclude_tasks=True)
new_block._parent = block._parent
new_block.block = evaluate_and_append_task(block.block)
new_block.rescue = evaluate_and_append_task(block.rescue)
new_block.always = evaluate_and_append_task(block.always)
return new_block
return evaluate_block(self)
def has_tasks(self):
return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
def get_include_params(self):
if self._parent:
return self._parent.get_include_params()
else:
return dict()
def all_parents_static(self):
'''
Determine if all of the parents of this block were statically loaded
or not. Since Task/TaskInclude objects may be in the chain, they simply
call their parents all_parents_static() method. Only Block objects in
the chain check the statically_loaded value of the parent.
'''
from ansible.playbook.task_include import TaskInclude
if self._parent:
if isinstance(self._parent, TaskInclude) and not self._parent.statically_loaded:
return False
return self._parent.all_parents_static()
return True
def get_first_parent_include(self):
from ansible.playbook.task_include import TaskInclude
if self._parent:
if isinstance(self._parent, TaskInclude):
return self._parent
return self._parent.get_first_parent_include()
return None
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,286 |
`vars:` keyword on inner `block:` does not override `vars:` in outer `block:`
|
### Summary
If a var is defined in the `vars:` of a block, and then redefined again in the `vars:` of an inner block, the outer value is still used.
`vars:` on a _task_ in the inner block work as expected.
### Issue Type
Bug Report
### Component Name
unknown
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.0.dev0] (devel 1fc1ab89ae) last updated 2021/07/17 12:29:56 (GMT -400)
config file = None
configured module search path = ['/home/briantist/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/briantist/code/ansible/ansible.core/lib/ansible
ansible collection location = /home/briantist/.ansible/collections:/usr/share/ansible/collections
executable location = /home/briantist/code/ansible/ansible.core/bin/ansible
python version = 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0]
jinja version = 3.0.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
CALLBACKS_ENABLED(env: ANSIBLE_CALLBACK_WHITELIST) = ['profile_tasks']
DEFAULT_FORKS(env: ANSIBLE_FORKS) = 30
INVENTORY_ENABLED(env: ANSIBLE_INVENTORY_ENABLED) = ['host_list', 'auto', 'ini', 'yaml', 'toml']
```
### OS / Environment
Ubuntu 18.04 on WSL2
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: outer
vars:
myvar: abc
block:
- debug:
var: myvar
- name: inner block
vars:
myvar: 123
block:
- debug:
var: myvar
- name: task
vars:
myvar: xyz
debug:
var: myvar
```
### Expected Results
inner block first `debug` result is `123`
### Actual Results
inner block first `debug` result is `abc`
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75286
|
https://github.com/ansible/ansible/pull/75287
|
29b5eb6ba9fb652ddd8dd06cdd8f2e80e2098063
|
b1d6750e8bbdf507a4af24a4319b78b7287e233c
| 2021-07-20T14:27:01Z |
python
| 2022-01-31T17:21:50Z |
test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role/tasks/main.yml
|
- testmodule:
register: testmodule_out_from_non_coll_role
- embedded_module:
register: embedded_module_out_from_non_coll_role
- name: check collections list from role meta
plugin_lookup:
register: pluginlookup_out
- debug: var=pluginlookup_out
- debug:
msg: '{{ test_role_input | default("(undefined)") }}'
register: test_role_output
- assert:
that:
- test_role_input is not defined or test_role_input == test_role_output.msg
- vars:
test_role_input: include another non-coll role
block:
- include_role:
name: non_coll_role_to_call
- assert:
that:
- test_role_output.msg == test_role_input
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,286 |
`vars:` keyword on inner `block:` does not override `vars:` in outer `block:`
|
### Summary
If a var is defined in the `vars:` of a block, and then redefined again in the `vars:` of an inner block, the outer value is still used.
`vars:` on a _task_ in the inner block work as expected.
### Issue Type
Bug Report
### Component Name
unknown
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.0.dev0] (devel 1fc1ab89ae) last updated 2021/07/17 12:29:56 (GMT -400)
config file = None
configured module search path = ['/home/briantist/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/briantist/code/ansible/ansible.core/lib/ansible
ansible collection location = /home/briantist/.ansible/collections:/usr/share/ansible/collections
executable location = /home/briantist/code/ansible/ansible.core/bin/ansible
python version = 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0]
jinja version = 3.0.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
CALLBACKS_ENABLED(env: ANSIBLE_CALLBACK_WHITELIST) = ['profile_tasks']
DEFAULT_FORKS(env: ANSIBLE_FORKS) = 30
INVENTORY_ENABLED(env: ANSIBLE_INVENTORY_ENABLED) = ['host_list', 'auto', 'ini', 'yaml', 'toml']
```
### OS / Environment
Ubuntu 18.04 on WSL2
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: outer
vars:
myvar: abc
block:
- debug:
var: myvar
- name: inner block
vars:
myvar: 123
block:
- debug:
var: myvar
- name: task
vars:
myvar: xyz
debug:
var: myvar
```
### Expected Results
inner block first `debug` result is `123`
### Actual Results
inner block first `debug` result is `abc`
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75286
|
https://github.com/ansible/ansible/pull/75287
|
29b5eb6ba9fb652ddd8dd06cdd8f2e80e2098063
|
b1d6750e8bbdf507a4af24a4319b78b7287e233c
| 2021-07-20T14:27:01Z |
python
| 2022-01-31T17:21:50Z |
test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/playbooks/roles/non_coll_role_to_call/tasks/main.yml
|
- debug:
msg: '{{ test_role_input | default("(undefined)") }}'
register: test_role_output
- assert:
that:
- test_role_input is not defined or test_role_input == test_role_output.msg
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,286 |
`vars:` keyword on inner `block:` does not override `vars:` in outer `block:`
|
### Summary
If a var is defined in the `vars:` of a block, and then redefined again in the `vars:` of an inner block, the outer value is still used.
`vars:` on a _task_ in the inner block work as expected.
### Issue Type
Bug Report
### Component Name
unknown
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.0.dev0] (devel 1fc1ab89ae) last updated 2021/07/17 12:29:56 (GMT -400)
config file = None
configured module search path = ['/home/briantist/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/briantist/code/ansible/ansible.core/lib/ansible
ansible collection location = /home/briantist/.ansible/collections:/usr/share/ansible/collections
executable location = /home/briantist/code/ansible/ansible.core/bin/ansible
python version = 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0]
jinja version = 3.0.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
CALLBACKS_ENABLED(env: ANSIBLE_CALLBACK_WHITELIST) = ['profile_tasks']
DEFAULT_FORKS(env: ANSIBLE_FORKS) = 30
INVENTORY_ENABLED(env: ANSIBLE_INVENTORY_ENABLED) = ['host_list', 'auto', 'ini', 'yaml', 'toml']
```
### OS / Environment
Ubuntu 18.04 on WSL2
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: outer
vars:
myvar: abc
block:
- debug:
var: myvar
- name: inner block
vars:
myvar: 123
block:
- debug:
var: myvar
- name: task
vars:
myvar: xyz
debug:
var: myvar
```
### Expected Results
inner block first `debug` result is `123`
### Actual Results
inner block first `debug` result is `abc`
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75286
|
https://github.com/ansible/ansible/pull/75287
|
29b5eb6ba9fb652ddd8dd06cdd8f2e80e2098063
|
b1d6750e8bbdf507a4af24a4319b78b7287e233c
| 2021-07-20T14:27:01Z |
python
| 2022-01-31T17:21:50Z |
test/integration/targets/var_inheritance/aliases
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,286 |
`vars:` keyword on inner `block:` does not override `vars:` in outer `block:`
|
### Summary
If a var is defined in the `vars:` of a block, and then redefined again in the `vars:` of an inner block, the outer value is still used.
`vars:` on a _task_ in the inner block work as expected.
### Issue Type
Bug Report
### Component Name
unknown
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.0.dev0] (devel 1fc1ab89ae) last updated 2021/07/17 12:29:56 (GMT -400)
config file = None
configured module search path = ['/home/briantist/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/briantist/code/ansible/ansible.core/lib/ansible
ansible collection location = /home/briantist/.ansible/collections:/usr/share/ansible/collections
executable location = /home/briantist/code/ansible/ansible.core/bin/ansible
python version = 3.6.9 (default, Jan 26 2021, 15:33:00) [GCC 8.4.0]
jinja version = 3.0.1
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
CALLBACKS_ENABLED(env: ANSIBLE_CALLBACK_WHITELIST) = ['profile_tasks']
DEFAULT_FORKS(env: ANSIBLE_FORKS) = 30
INVENTORY_ENABLED(env: ANSIBLE_INVENTORY_ENABLED) = ['host_list', 'auto', 'ini', 'yaml', 'toml']
```
### OS / Environment
Ubuntu 18.04 on WSL2
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: outer
vars:
myvar: abc
block:
- debug:
var: myvar
- name: inner block
vars:
myvar: 123
block:
- debug:
var: myvar
- name: task
vars:
myvar: xyz
debug:
var: myvar
```
### Expected Results
inner block first `debug` result is `123`
### Actual Results
inner block first `debug` result is `abc`
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75286
|
https://github.com/ansible/ansible/pull/75287
|
29b5eb6ba9fb652ddd8dd06cdd8f2e80e2098063
|
b1d6750e8bbdf507a4af24a4319b78b7287e233c
| 2021-07-20T14:27:01Z |
python
| 2022-01-31T17:21:50Z |
test/integration/targets/var_inheritance/tasks/main.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 76,382 |
Add undef() to the 2.12 porting guide
|
### Summary
https://github.com/ansible/ansible/pull/75435 added a new global Jinja function, which people may need to account for in their templating if they were using `undef` for other purposes.
For example, this template (simplified from an example that came up on IRC):
```
{% macro funroll_loops(parent=undef) %}
{% if parent is defined %}
{{ parent }}
{% endif %}
{% endmacro %}
{{ funroll_loops() }}
```
In old versions of Ansible the output file would be empty. In 2.12, it is:
```
<bound method Templar._make_undefined of <ansible.template.Templar object at 0x7f668e0ead00>>
```
### Issue Type
Documentation Report
### Component Name
porting_guide_core_2.12.rst
### Ansible Version
```console
$ ansible --version
ansible [core 2.12.0]
config file = /home/ec2-user/ansible-aws/ansible/ansible.cfg
configured module search path = ['/home/ec2-user/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/ec2-user/ansible-aws/.venv/lib/python3.8/site-packages/ansible
ansible collection location = /home/ec2-user/ansible-aws/ansible/collections
executable location = /home/ec2-user/ansible-aws/.venv/bin/ansible
python version = 3.8.5 (default, Feb 18 2021, 01:24:20) [GCC 7.3.1 20180712 (Red Hat 7.3.1-12)]
jinja version = 3.0.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
```
### OS / Environment
n/a
### Additional Information
2.12 also hangs indefinitely with the following task, which I don't have time to dig into at the moment but is very odd behaviour:
```
- debug:
msg: "{{ undef | default('') }}"
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/76382
|
https://github.com/ansible/ansible/pull/76880
|
10fd7bf12c75d470c2fa6173c9ca055c322cbfc3
|
e1cf6af06d2aa36bc131505cd42270f01c94f057
| 2021-11-28T21:23:18Z |
python
| 2022-02-03T19:45:11Z |
docs/docsite/rst/porting_guides/porting_guide_core_2.12.rst
|
.. _porting_2.12_guide_core:
*******************************
Ansible-core 2.12 Porting Guide
*******************************
This section discusses the behavioral changes between ``ansible-core`` 2.11 and ``ansible-core`` 2.12.
It is intended to assist in updating your playbooks, plugins and other parts of your Ansible infrastructure so they will work with this version of Ansible.
We suggest you read this page along with `ansible-core Changelog for 2.12 <https://github.com/ansible/ansible/blob/stable-2.12/changelogs/CHANGELOG-v2.12.rst>`_ to understand what updates you may need to make.
This document is part of a collection on porting. The complete list of porting guides can be found at :ref:`porting guides <porting_guides>`.
.. contents:: Topics
Playbook
========
* When calling tasks and setting ``async``, setting ``ANSIBLE_ASYNC_DIR`` under ``environment:`` is no longer valid. Instead, use the shell configuration variable ``async_dir``, for example by setting ``ansible_async_dir``:
.. code-block:: yaml
tasks:
- dnf:
name: '*'
state: latest
async: 300
poll: 5
vars:
ansible_async_dir: /path/to/my/custom/dir
Python Interpreter Discovery
============================
The default value of ``INTERPRETER_PYTHON_FALLBACK`` changed to ``auto``. The list of Python interpreters in ``INTERPRETER_PYTHON_FALLBACK`` changed to prefer Python 3 over Python 2. The combination of these two changes means the new default behavior is to quietly prefer Python 3 over Python 2 on remote hosts. Previously a deprecation warning was issued in situations where interpreter discovery would have used Python 3 but the interpreter was set to ``/usr/bin/python``.
``INTERPRETER_PYTHON_FALLBACK`` can be changed from the default list of interpreters by setting the ``ansible_interpreter_python_fallback`` variable.
See :ref:`interpreter discovery documentation <interpreter_discovery>` for more details.
Command Line
============
* Python 3.8 on the controller node is a hard requirement for this release. The command line scripts will not function with a lower Python version.
* ``ansible-vault`` no longer supports ``PyCrypto`` and requires ``cryptography``.
Deprecated
==========
* Python 2.6 on the target node is deprecated in this release. ``ansible-core`` 2.13 will remove support for Python 2.6.
* Bare variables in conditionals: ``when`` conditionals no longer automatically parse string booleans such as ``"true"`` and ``"false"`` into actual booleans. Any variable containing a non-empty string is considered true. This was previously configurable with the ``CONDITIONAL_BARE_VARS`` configuration option (and the ``ANSIBLE_CONDITIONAL_BARE_VARS`` environment variable). This setting no longer has any effect. Users can work around the issue by using the ``|bool`` filter:
.. code-block:: yaml
vars:
teardown: 'false'
tasks:
- include_tasks: teardown.yml
when: teardown | bool
- include_tasks: provision.yml
when: not teardown | bool
* The ``_remote_checksum()`` method in ``ActionBase`` is deprecated. Any action plugin using this method should use ``_execute_remote_stat()`` instead.
Modules
=======
* ``cron`` now requires ``name`` to be specified in all cases.
* ``cron`` no longer allows a ``reboot`` parameter. Use ``special_time: reboot`` instead.
* ``hostname`` - On FreeBSD, the ``before`` result will no longer be ``"temporarystub"`` if permanent hostname file does not exist. It will instead be ``""`` (empty string) for consistency with other systems.
* ``hostname`` - On OpenRC and Solaris based systems, the ``before`` result will no longer be ``"UNKNOWN"`` if the permanent hostname file does not exist. It will instead be ``""`` (empty string) for consistency with other systems.
* ``pip`` now uses the ``pip`` Python module installed for the Ansible module's Python interpreter, if available, unless ``executable`` or ``virtualenv`` were specified.
Modules removed
---------------
The following modules no longer exist:
* No notable changes
Deprecation notices
-------------------
No notable changes
Noteworthy module changes
-------------------------
No notable changes
Plugins
=======
* ``unique`` filter with Jinja2 < 2.10 is case-sensitive and now raise coherently an error if ``case_sensitive=False`` instead of when ``case_sensitive=True``.
* Set theory filters (``intersect``, ``difference``, ``symmetric_difference`` and ``union``) are now case-sensitive. Explicitly use ``case_sensitive=False`` to keep previous behavior. Note: with Jinja2 < 2.10, the filters were already case-sensitive by default.
* ``password_hash``` now uses ``passlib`` defaults when an option is unspecified, e.g. ``bcrypt_sha256`` now default to the "2b" format and if the "2a" format is required it must be specified.
Porting custom scripts
======================
No notable changes
Networking
==========
No notable changes
|
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,971 |
add_host module cannot handle a variable in a condition
|
### Summary
When I define `failed_when:` condition in the task using `add_host`, it caused an error that the variable was not found.
### Issue Type
Bug Report
### Component Name
task
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.2]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/var/lib/awx/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /var/lib/awx/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.8.6 (default, Jan 22 2021, 11:41:28) [GCC 8.4.1 20200928 (Red Hat 8.4.1-1)]
jinja version = 2.10.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
$
(nothing)
```
### OS / Environment
$ cat /etc/redhat-release
Red Hat Enterprise Linux release 8.4 (Ootpa)
### Steps to Reproduce
### pattern 1: using `add_host` module
```
---
- hosts: localhost
gather_facts: false
vars:
test: 1
tasks:
- add_host:
name: "myhost"
groups: "mygroup"
ansible_host: "host46"
ansible_ssh_host: "192.168.100.100"
failed_when: test == 2
```
When I ran the above playbook, it failed with error as below even the variable 'test' was defined in the play.
```
$ ansible-playbook -i localhost, conditional.yml
PLAY [localhost] *********************************************************************************************************************************************************************************
TASK [add_host] **********************************************************************************************************************************************************************************
ERROR! The conditional check 'test == 2' failed. The error was: error while evaluating conditional (test == 2): 'test' is undefined
```
If I passed the variable as an extra_vars and specified to be true, it worked as expected.
```
$ ansible-playbook -i localhost, conditional.yml -e "{test: 2}"
PLAY [localhost] *********************************************************************************************************************************************************************************
TASK [add_host] **********************************************************************************************************************************************************************************
fatal: [localhost]: FAILED! => {"add_host": {"groups": ["mygroup"], "host_name": "myhost", "host_vars": {"ansible_host": "host46", "ansible_ssh_host": "192.168.100.100"}}, "changed": false, "failed_when_result": true}
NO MORE HOSTS LEFT *******************************************************************************************************************************************************************************
PLAY RECAP ***************************************************************************************************************************************************************************************
localhost : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
On the other hand when "{test: 1}", it failed.
```
$ ansible-playbook -i localhost, conditional.yml -e "{test: 1}"
PLAY [localhost] *********************************************************************************************************************************************************************************
TASK [add_host] **********************************************************************************************************************************************************************************
ERROR! The conditional check 'test == 2' failed. The error was: error while evaluating conditional (test == 2): 'test' is undefined
```
### pattern 2: using `debug` module, it worked as desired
```
---
- hosts: localhost
gather_facts: false
vars:
test: 1
tasks:
- debug:
msg: "debug message"
failed_when: test == 2
```
It works.
```
$ ansible-playbook -i localhost, debug.yml
PLAY [localhost] *********************************************************************************************************************************************************************************
TASK [debug] *************************************************************************************************************************************************************************************
ok: [localhost] => {
"msg": "debug message"
}
PLAY RECAP ***************************************************************************************************************************************************************************************
localhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
When passed `2`, it failed as expected.
```
$ ansible-playbook -i localhost, debug.yml -e "{test: 2}"
PLAY [localhost] *********************************************************************************************************************************************************************************
TASK [debug] *************************************************************************************************************************************************************************************
fatal: [localhost]: FAILED! => {
"msg": "debug message"
}
PLAY RECAP ***************************************************************************************************************************************************************************************
localhost : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
### Expected Results
The `pattern 1` should work as the `pattern 2` from the point of view of handling the conditional.
### Actual Results
```console
(already pasted above)
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75971
|
https://github.com/ansible/ansible/pull/71719
|
2749d9fbf9242a59ed87f46ea057d84f4768a93e
|
394d216922d70709248a60f58da300f1e70f5894
| 2021-10-08T06:47:15Z |
python
| 2022-02-04T11:35:23Z |
changelogs/fragments/71627-add_host-group_by-fix-changed_when-in-loop.yml
| |
closed
|
ansible/ansible
|
https://github.com/ansible/ansible
| 75,971 |
add_host module cannot handle a variable in a condition
|
### Summary
When I define `failed_when:` condition in the task using `add_host`, it caused an error that the variable was not found.
### Issue Type
Bug Report
### Component Name
task
### Ansible Version
```console
$ ansible --version
ansible [core 2.11.2]
config file = /etc/ansible/ansible.cfg
configured module search path = ['/var/lib/awx/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3.8/site-packages/ansible
ansible collection location = /var/lib/awx/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.8.6 (default, Jan 22 2021, 11:41:28) [GCC 8.4.1 20200928 (Red Hat 8.4.1-1)]
jinja version = 2.10.3
libyaml = True
```
### Configuration
```console
$ ansible-config dump --only-changed
$
(nothing)
```
### OS / Environment
$ cat /etc/redhat-release
Red Hat Enterprise Linux release 8.4 (Ootpa)
### Steps to Reproduce
### pattern 1: using `add_host` module
```
---
- hosts: localhost
gather_facts: false
vars:
test: 1
tasks:
- add_host:
name: "myhost"
groups: "mygroup"
ansible_host: "host46"
ansible_ssh_host: "192.168.100.100"
failed_when: test == 2
```
When I ran the above playbook, it failed with error as below even the variable 'test' was defined in the play.
```
$ ansible-playbook -i localhost, conditional.yml
PLAY [localhost] *********************************************************************************************************************************************************************************
TASK [add_host] **********************************************************************************************************************************************************************************
ERROR! The conditional check 'test == 2' failed. The error was: error while evaluating conditional (test == 2): 'test' is undefined
```
If I passed the variable as an extra_vars and specified to be true, it worked as expected.
```
$ ansible-playbook -i localhost, conditional.yml -e "{test: 2}"
PLAY [localhost] *********************************************************************************************************************************************************************************
TASK [add_host] **********************************************************************************************************************************************************************************
fatal: [localhost]: FAILED! => {"add_host": {"groups": ["mygroup"], "host_name": "myhost", "host_vars": {"ansible_host": "host46", "ansible_ssh_host": "192.168.100.100"}}, "changed": false, "failed_when_result": true}
NO MORE HOSTS LEFT *******************************************************************************************************************************************************************************
PLAY RECAP ***************************************************************************************************************************************************************************************
localhost : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
On the other hand when "{test: 1}", it failed.
```
$ ansible-playbook -i localhost, conditional.yml -e "{test: 1}"
PLAY [localhost] *********************************************************************************************************************************************************************************
TASK [add_host] **********************************************************************************************************************************************************************************
ERROR! The conditional check 'test == 2' failed. The error was: error while evaluating conditional (test == 2): 'test' is undefined
```
### pattern 2: using `debug` module, it worked as desired
```
---
- hosts: localhost
gather_facts: false
vars:
test: 1
tasks:
- debug:
msg: "debug message"
failed_when: test == 2
```
It works.
```
$ ansible-playbook -i localhost, debug.yml
PLAY [localhost] *********************************************************************************************************************************************************************************
TASK [debug] *************************************************************************************************************************************************************************************
ok: [localhost] => {
"msg": "debug message"
}
PLAY RECAP ***************************************************************************************************************************************************************************************
localhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
```
When passed `2`, it failed as expected.
```
$ ansible-playbook -i localhost, debug.yml -e "{test: 2}"
PLAY [localhost] *********************************************************************************************************************************************************************************
TASK [debug] *************************************************************************************************************************************************************************************
fatal: [localhost]: FAILED! => {
"msg": "debug message"
}
PLAY RECAP ***************************************************************************************************************************************************************************************
localhost : ok=0 changed=0 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
### Expected Results
The `pattern 1` should work as the `pattern 2` from the point of view of handling the conditional.
### Actual Results
```console
(already pasted above)
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
|
https://github.com/ansible/ansible/issues/75971
|
https://github.com/ansible/ansible/pull/71719
|
2749d9fbf9242a59ed87f46ea057d84f4768a93e
|
394d216922d70709248a60f58da300f1e70f5894
| 2021-10-08T06:47:15Z |
python
| 2022-02-04T11:35:23Z |
lib/ansible/constants.py
|
# Copyright: (c) 2012-2014, Michael DeHaan <[email protected]>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ast import literal_eval
from jinja2 import Template
from string import ascii_letters, digits
from ansible.config.manager import ConfigManager, ensure_type
from ansible.module_utils._text import to_text
from ansible.module_utils.common.collections import Sequence
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
from ansible.module_utils.six import string_types
from ansible.release import __version__
from ansible.utils.fqcn import add_internal_fqcns
def _warning(msg):
''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
try:
from ansible.utils.display import Display
Display().warning(msg)
except Exception:
import sys
sys.stderr.write(' [WARNING] %s\n' % (msg))
def _deprecated(msg, version):
''' display is not guaranteed here, nor it being the full class, but try anyways, fallback to sys.stderr.write '''
try:
from ansible.utils.display import Display
Display().deprecated(msg, version=version)
except Exception:
import sys
sys.stderr.write(' [DEPRECATED] %s, to be removed in %s\n' % (msg, version))
def set_constant(name, value, export=vars()):
''' sets constants and returns resolved options dict '''
export[name] = value
class _DeprecatedSequenceConstant(Sequence):
def __init__(self, value, msg, version):
self._value = value
self._msg = msg
self._version = version
def __len__(self):
_deprecated(self._msg, self._version)
return len(self._value)
def __getitem__(self, y):
_deprecated(self._msg, self._version)
return self._value[y]
# CONSTANTS ### yes, actual ones
# The following are hard-coded action names
_ACTION_DEBUG = add_internal_fqcns(('debug', ))
_ACTION_IMPORT_PLAYBOOK = add_internal_fqcns(('import_playbook', ))
_ACTION_IMPORT_ROLE = add_internal_fqcns(('import_role', ))
_ACTION_IMPORT_TASKS = add_internal_fqcns(('import_tasks', ))
_ACTION_INCLUDE = add_internal_fqcns(('include', ))
_ACTION_INCLUDE_ROLE = add_internal_fqcns(('include_role', ))
_ACTION_INCLUDE_TASKS = add_internal_fqcns(('include_tasks', ))
_ACTION_INCLUDE_VARS = add_internal_fqcns(('include_vars', ))
_ACTION_META = add_internal_fqcns(('meta', ))
_ACTION_SET_FACT = add_internal_fqcns(('set_fact', ))
_ACTION_SETUP = add_internal_fqcns(('setup', ))
_ACTION_HAS_CMD = add_internal_fqcns(('command', 'shell', 'script'))
_ACTION_ALLOWS_RAW_ARGS = _ACTION_HAS_CMD + add_internal_fqcns(('raw', ))
_ACTION_ALL_INCLUDES = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS + _ACTION_INCLUDE_ROLE
_ACTION_ALL_INCLUDE_IMPORT_TASKS = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS + _ACTION_IMPORT_TASKS
_ACTION_ALL_PROPER_INCLUDE_IMPORT_ROLES = _ACTION_INCLUDE_ROLE + _ACTION_IMPORT_ROLE
_ACTION_ALL_PROPER_INCLUDE_IMPORT_TASKS = _ACTION_INCLUDE_TASKS + _ACTION_IMPORT_TASKS
_ACTION_ALL_INCLUDE_ROLE_TASKS = _ACTION_INCLUDE_ROLE + _ACTION_INCLUDE_TASKS
_ACTION_ALL_INCLUDE_TASKS = _ACTION_INCLUDE + _ACTION_INCLUDE_TASKS
_ACTION_FACT_GATHERING = _ACTION_SETUP + add_internal_fqcns(('gather_facts', ))
_ACTION_WITH_CLEAN_FACTS = _ACTION_SET_FACT + _ACTION_INCLUDE_VARS
# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
COLOR_CODES = {
'black': u'0;30', 'bright gray': u'0;37',
'blue': u'0;34', 'white': u'1;37',
'green': u'0;32', 'bright blue': u'1;34',
'cyan': u'0;36', 'bright green': u'1;32',
'red': u'0;31', 'bright cyan': u'1;36',
'purple': u'0;35', 'bright red': u'1;31',
'yellow': u'0;33', 'bright purple': u'1;35',
'dark gray': u'1;30', 'bright yellow': u'1;33',
'magenta': u'0;35', 'bright magenta': u'1;35',
'normal': u'0',
}
REJECT_EXTS = ('.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt', '.rst')
BOOL_TRUE = BOOLEANS_TRUE
COLLECTION_PTYPE_COMPAT = {'module': 'modules'}
DEFAULT_BECOME_PASS = None
DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
# FIXME: expand to other plugins, but never doc fragments
CONFIGURABLE_PLUGINS = ('become', 'cache', 'callback', 'cliconf', 'connection', 'httpapi', 'inventory', 'lookup', 'netconf', 'shell', 'vars')
# NOTE: always update the docs/docsite/Makefile to match
DOCUMENTABLE_PLUGINS = CONFIGURABLE_PLUGINS + ('module', 'strategy')
IGNORE_FILES = ("COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES") # ignore during module search
INTERNAL_RESULT_KEYS = ('add_host', 'add_group')
LOCALHOST = ('127.0.0.1', 'localhost', '::1')
MODULE_REQUIRE_ARGS = tuple(add_internal_fqcns(('command', 'win_command', 'ansible.windows.win_command', 'shell', 'win_shell',
'ansible.windows.win_shell', 'raw', 'script')))
MODULE_NO_JSON = tuple(add_internal_fqcns(('command', 'win_command', 'ansible.windows.win_command', 'shell', 'win_shell',
'ansible.windows.win_shell', 'raw')))
RESTRICTED_RESULT_KEYS = ('ansible_rsync_path', 'ansible_playbook_python', 'ansible_facts')
TREE_DIR = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
# This matches a string that cannot be used as a valid python variable name i.e 'not-valid', 'not!valid@either' '1_nor_This'
INVALID_VARIABLE_NAMES = re.compile(r'^[\d\W]|[^\w]')
# FIXME: remove once play_context mangling is removed
# the magic variable mapping dictionary below is used to translate
# host/inventory variables to fields in the PlayContext
# object. The dictionary values are tuples, to account for aliases
# in variable names.
COMMON_CONNECTION_VARS = frozenset(('ansible_connection', 'ansible_host', 'ansible_user', 'ansible_shell_executable',
'ansible_port', 'ansible_pipelining', 'ansible_password', 'ansible_timeout',
'ansible_shell_type', 'ansible_module_compression', 'ansible_private_key_file'))
MAGIC_VARIABLE_MAPPING = dict(
# base
connection=('ansible_connection', ),
module_compression=('ansible_module_compression', ),
shell=('ansible_shell_type', ),
executable=('ansible_shell_executable', ),
# connection common
remote_addr=('ansible_ssh_host', 'ansible_host'),
remote_user=('ansible_ssh_user', 'ansible_user'),
password=('ansible_ssh_pass', 'ansible_password'),
port=('ansible_ssh_port', 'ansible_port'),
pipelining=('ansible_ssh_pipelining', 'ansible_pipelining'),
timeout=('ansible_ssh_timeout', 'ansible_timeout'),
private_key_file=('ansible_ssh_private_key_file', 'ansible_private_key_file'),
# networking modules
network_os=('ansible_network_os', ),
connection_user=('ansible_connection_user',),
# ssh TODO: remove
ssh_executable=('ansible_ssh_executable', ),
ssh_common_args=('ansible_ssh_common_args', ),
sftp_extra_args=('ansible_sftp_extra_args', ),
scp_extra_args=('ansible_scp_extra_args', ),
ssh_extra_args=('ansible_ssh_extra_args', ),
ssh_transfer_method=('ansible_ssh_transfer_method', ),
# docker TODO: remove
docker_extra_args=('ansible_docker_extra_args', ),
# become
become=('ansible_become', ),
become_method=('ansible_become_method', ),
become_user=('ansible_become_user', ),
become_pass=('ansible_become_password', 'ansible_become_pass'),
become_exe=('ansible_become_exe', ),
become_flags=('ansible_become_flags', ),
)
# POPULATE SETTINGS FROM CONFIG ###
config = ConfigManager()
# Generate constants from config
for setting in config.data.get_settings():
value = setting.value
if setting.origin == 'default' and \
isinstance(setting.value, string_types) and \
(setting.value.startswith('{{') and setting.value.endswith('}}')):
try:
t = Template(setting.value)
value = t.render(vars())
try:
value = literal_eval(value)
except ValueError:
pass # not a python data structure
except Exception:
pass # not templatable
value = ensure_type(value, setting.type)
set_constant(setting.name, value)
for warn in config.WARNINGS:
_warning(warn)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.