Refactor yum and dnf, add feature parity (#43621)

* Refactor yum and dnf, add feature parity

Signed-off-by: Adam Miller <admiller@redhat.com>

* remove unnecessary module_utils, move the classes into the module code

Signed-off-by: Adam Miller <admiller@redhat.com>

* remove yum -> yum4, out of scope

Signed-off-by: Adam Miller <admiller@redhat.com>

* use ABCMeta

Signed-off-by: Adam Miller <admiller@redhat.com>

* re-arrange run() caller vs callee

Signed-off-by: Adam Miller <admiller@redhat.com>

* make sanity checks happy

Signed-off-by: Adam Miller <admiller@redhat.com>

* fix yum unit tests

Signed-off-by: Adam Miller <admiller@redhat.com>

* remove unecessary debug statements, fix typo

Signed-off-by: Adam Miller <admiller@redhat.com>

* fix licensing and attribution in yumdnf module_util

Signed-off-by: Adam Miller <admiller@redhat.com>

* include fix from PR 40737

original commit 5cbda9658a
original Author: Strahinja Kustudic <kustodian@gmail.com>

yum will fail on 'No space left on device', fixes #32791 (#40737)

During the installing of packages if yum runs out of free disk space,
some post install scripts could fail (like e.g. when the kernel
package generates initramfs), but yum would still exit with a status
0.  This is bad, especially for the kernel package, because it makes
it unable to boot.  Because the yum module is usually used for
automation, which means the users cannot read every message yum
prints, it's better that the yum module fails if it detects that
there is no free space on the disk.

Signed-off-by: Adam Miller <admiller@redhat.com>

* Revert "fix licensing and attribution in yumdnf module_util"

This reverts commit 59e11de5a2.

* move fetch_rpm_from_url out of yumdnf module_util

Signed-off-by: Adam Miller <admiller@redhat.com>

* fix the move of fetch_rpm_from_url

Signed-off-by: Adam Miller <admiller@redhat.com>
pull/44323/head
Adam Miller 6 years ago committed by ansibot
parent d7921b4d5b
commit 6d95624c22

@ -0,0 +1,99 @@
# -*- coding: utf-8 -*-
#
# # Copyright: (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
# Contributing Authors:
# - Ansible Core Team
# - Eduard Snesarev (@verm666)
# - Berend De Schouwer (@berenddeschouwer)
# - Abhijeet Kasurde (@Akasurde)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import os
import tempfile
from abc import ABCMeta, abstractmethod
from ansible.module_utils._text import to_native
from ansible.module_utils.six import with_metaclass
yumdnf_argument_spec = dict(
argument_spec=dict(
allow_downgrade=dict(type='bool', default=False),
autoremove=dict(type='bool', default=False),
bugfix=dict(required=False, type='bool', default=False),
conf_file=dict(type='str'),
disable_excludes=dict(type='str', default=None, choices=['all', 'main', 'repoid']),
disable_gpg_check=dict(type='bool', default=False),
disable_plugin=dict(type='list', default=[]),
disablerepo=dict(type='list', default=[]),
download_only=dict(type='bool', default=False),
enable_plugin=dict(type='list', default=[]),
enablerepo=dict(type='list', default=[]),
exclude=dict(type='list', default=[]),
installroot=dict(type='str', default="/"),
install_repoquery=dict(type='bool', default=True),
list=dict(type='str'),
name=dict(type='list', aliases=['pkg'], default=[]),
releasever=dict(default=None),
security=dict(type='bool', default=False),
skip_broken=dict(type='bool', default=False),
# removed==absent, installed==present, these are accepted as aliases
state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
update_cache=dict(type='bool', default=False, aliases=['expire-cache']),
update_only=dict(required=False, default="no", type='bool'),
validate_certs=dict(type='bool', default=True),
# this should not be needed, but exists as a failsafe
),
required_one_of=[['name', 'list']],
mutually_exclusive=[['name', 'list']],
supports_check_mode=True,
)
class YumDnf(with_metaclass(ABCMeta, object)):
"""
Abstract class that handles the population of instance variables that should
be identical between both YUM and DNF modules because of the feature parity
and shared argument spec
"""
def __init__(self, module):
self.module = module
self.allow_downgrade = self.module.params['allow_downgrade']
self.autoremove = self.module.params['autoremove']
self.bugfix = self.module.params['bugfix']
self.conf_file = self.module.params['conf_file']
self.disable_excludes = self.module.params['disable_excludes']
self.disable_gpg_check = self.module.params['disable_gpg_check']
self.disable_plugin = self.module.params['disable_plugin']
self.disablerepo = self.module.params.get('disablerepo', [])
self.download_only = self.module.params['download_only']
self.enable_plugin = self.module.params['enable_plugin']
self.enablerepo = self.module.params.get('enablerepo', [])
self.exclude = self.module.params['exclude']
self.installroot = self.module.params['installroot']
self.install_repoquery = self.module.params['install_repoquery']
self.list = self.module.params['list']
self.names = [p.strip() for p in self.module.params['name']]
self.releasever = self.module.params['releasever']
self.security = self.module.params['security']
self.skip_broken = self.module.params['skip_broken']
self.state = self.module.params['state']
self.update_only = self.module.params['update_only']
self.update_cache = self.module.params['update_cache']
self.validate_certs = self.module.params['validate_certs']
# It's possible someone passed a comma separated string since it used
# to be a string type, so we should handle that
if self.enablerepo and len(self.enablerepo) == 1 and ',' in self.enablerepo:
self.enablerepo = self.module.params['enablerepo'].split(',')
if self.disablerepo and len(self.disablerepo) == 1 and ',' in self.disablerepo:
self.disablerepo = self.module.params['disablerepo'].split(',')
if self.exclude and len(self.exclude) == 1 and ',' in self.exclude:
self.exclude = self.module.params['exclude'].split(',')
@abstractmethod
def run(self):
raise NotImplementedError

@ -3,6 +3,7 @@
# Copyright 2015 Cristian van Ee <cristian at cvee.org> # Copyright 2015 Cristian van Ee <cristian at cvee.org>
# Copyright 2015 Igor Gnatenko <i.gnatenko.brain@gmail.com> # Copyright 2015 Igor Gnatenko <i.gnatenko.brain@gmail.com>
# Copyright 2018 Adam Miller <admiller@redhat.com>
# #
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@ -88,6 +89,95 @@ options:
type: bool type: bool
default: false default: false
version_added: "2.4" version_added: "2.4"
exclude:
description:
- Package name(s) to exclude when state=present, or latest. This can be a
list or a comma separated string.
version_added: "2.7"
skip_broken:
description:
- Skip packages with broken dependencies(devsolve) and are causing problems.
type: bool
default: "no"
version_added: "2.7"
update_cache:
description:
- Force yum to check if cache is out of date and redownload if needed.
Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
aliases: [ expire-cache ]
version_added: "2.7"
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- Has an effect only if state is I(latest)
required: false
default: "no"
type: bool
version_added: "2.7"
security:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked security related.
type: bool
default: "no"
version_added: "2.7"
bugfix:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
required: false
default: "no"
type: bool
version_added: "2.7"
enable_plugin:
description:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
required: false
version_added: "2.7"
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
required: false
version_added: "2.7"
disable_excludes:
description:
- Disable the excludes defined in DNF config files.
- If set to C(all), disables all excludes.
- If set to C(main), disable excludes defined in [main] in yum.conf.
- If set to C(repoid), disable excludes defined for given repo id.
required: false
choices: [ all, main, repoid ]
version_added: "2.7"
validate_certs:
description:
- This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
type: bool
default: "yes"
version_added: "2.7"
allow_downgrade:
description:
- This is effectively a no-op in DNF as it is the default behavior of dnf, but is an accepted parameter for feature
parity/compatibility with the I(yum) module.
type: bool
default: False
version_added: "2.7"
install_repoquery:
description:
- This is effectively a no-op in DNF as it is not needed with DNF, but is an accepted parameter for feature
parity/compatibility with the I(yum) module.
type: bool
default: True
version_added: "2.7"
download_only:
description:
- Only download the packages, do not install them.
required: false
default: "no"
type: bool
version_added: "2.7"
notes: notes:
- When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option. - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
requirements: requirements:
@ -98,6 +188,7 @@ author:
- '"Igor Gnatenko (@ignatenkobrain)" <i.gnatenko.brain@gmail.com>' - '"Igor Gnatenko (@ignatenkobrain)" <i.gnatenko.brain@gmail.com>'
- '"Cristian van Ee (@DJMuggs)" <cristian at cvee.org>' - '"Cristian van Ee (@DJMuggs)" <cristian at cvee.org>'
- "Berend De Schouwer (github.com/berenddeschouwer)" - "Berend De Schouwer (github.com/berenddeschouwer)"
- '"Adam Miller (@maxamillion)" <admiller@redhat.com>"'
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -147,7 +238,9 @@ EXAMPLES = '''
state: absent state: absent
autoremove: no autoremove: no
''' '''
import os import os
import tempfile
try: try:
import dnf import dnf
@ -160,378 +253,476 @@ try:
except ImportError: except ImportError:
HAS_DNF = False HAS_DNF = False
from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native, to_text
from ansible.module_utils._text import to_native from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six import PY2 from ansible.module_utils.six import PY2
from distutils.version import LooseVersion from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
# 64k. Number of bytes to read at a time when manually downloading pkgs via a url
BUFSIZE = 65536
class DnfModule(YumDnf):
"""
DNF Ansible module back-end implementation
"""
def _ensure_dnf(module): def __init__(self, module):
if not HAS_DNF: # This populates instance vars for all argument spec params
if PY2: super(DnfModule, self).__init__(module)
package = 'python2-dnf'
self._ensure_dnf()
def fetch_rpm_from_url(self, spec):
# FIXME: Remove this once this PR is merged:
# https://github.com/ansible/ansible/pull/19172
# download package so that we can query it
package_name, dummy = os.path.splitext(str(spec.rsplit('/', 1)[1]))
package_file = tempfile.NamedTemporaryFile(dir=self.module.tmpdir, prefix=package_name, suffix='.rpm', delete=False)
self.module.add_cleanup_file(package_file.name)
try:
rsp, info = fetch_url(self.module, spec)
if not rsp:
self.module.fail_json(msg="Failure downloading %s, %s" % (spec, info['msg']))
data = rsp.read(BUFSIZE)
while data:
package_file.write(data)
data = rsp.read(BUFSIZE)
package_file.close()
except Exception as e:
self.module.fail_json(msg="Failure downloading %s, %s" % (spec, to_native(e)))
return package_file.name
def _ensure_dnf(self):
if not HAS_DNF:
if PY2:
package = 'python2-dnf'
else:
package = 'python3-dnf'
if self.module.check_mode:
self.module.fail_json(
msg="`{0}` is not installed, but it is required"
"for the Ansible dnf module.".format(package)
)
self.module.run_command(['dnf', 'install', '-y', package], check_rc=True)
global dnf
try:
import dnf
import dnf.cli
import dnf.const
import dnf.exceptions
import dnf.subject
import dnf.util
except ImportError:
self.module.fail_json(
msg="Could not import the dnf python module. "
"Please install `{0}` package.".format(package)
)
def _configure_base(self, base, conf_file, disable_gpg_check, installroot='/'):
"""Configure the dnf Base object."""
if self.enable_plugin and self.disable_plugin:
base.init_plugins(self.disable_plugin, self.enable_plugin)
elif self.enable_plugin:
base.init_plugins(enable_plugins=self.enable_plugin)
elif self.disable_plugin:
base.init_plugins(self.disable_plugin)
conf = base.conf
# Turn off debug messages in the output
conf.debuglevel = 0
# Set whether to check gpg signatures
conf.gpgcheck = not disable_gpg_check
# Don't prompt for user confirmations
conf.assumeyes = True
# Set installroot
conf.installroot = installroot
# Set excludes
if self.exclude:
conf.exclude(self.exclude)
# Set disable_excludes
if self.disable_excludes:
conf.disable_excludes = [self.disable_excludes]
# Set releasever
if self.releasever is not None:
conf.substitutions['releasever'] = self.releasever
# Set skip_broken (in dnf this is strict=0)
if self.skip_broken:
conf.strict = 0
if self.download_only:
conf.downloadonly = True
# Change the configuration file path if provided
if conf_file:
# Fail if we can't read the configuration file.
if not os.access(conf_file, os.R_OK):
self.module.fail_json(
msg="cannot read configuration file", conf_file=conf_file)
else:
conf.config_file_path = conf_file
# Read the configuration file
conf.read()
def _specify_repositories(self, base, disablerepo, enablerepo):
"""Enable and disable repositories matching the provided patterns."""
base.read_all_repos()
repos = base.repos
# Disable repositories
for repo_pattern in disablerepo:
for repo in repos.get_matching(repo_pattern):
repo.disable()
# Enable repositories
for repo_pattern in enablerepo:
for repo in repos.get_matching(repo_pattern):
repo.enable()
def _base(self, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot):
"""Return a fully configured dnf Base object."""
base = dnf.Base()
self._configure_base(base, conf_file, disable_gpg_check, installroot)
self._specify_repositories(base, disablerepo, enablerepo)
base.fill_sack(load_system_repo='auto')
if self.bugfix:
key = {'advisory_type__eq': 'bugfix'}
base._update_security_filters = [base.sack.query().filter(**key)]
if self.security:
key = {'advisory_type__eq': 'security'}
base._update_security_filters = [base.sack.query().filter(**key)]
if self.update_cache:
base.update_cache()
return base
def _package_dict(self, package):
"""Return a dictionary of information for the package."""
# NOTE: This no longer contains the 'dnfstate' field because it is
# already known based on the query type.
result = {
'name': package.name,
'arch': package.arch,
'epoch': str(package.epoch),
'release': package.release,
'version': package.version,
'repo': package.repoid}
result['nevra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format(
**result)
return result
def list_items(self, command):
"""List package info based on the command."""
# Rename updates to upgrades
if command == 'updates':
command = 'upgrades'
# Return the corresponding packages
if command in ['installed', 'upgrades', 'available']:
results = [
self._package_dict(package)
for package in getattr(self.base.sack.query(), command)()]
# Return the enabled repository ids
elif command in ['repos', 'repositories']:
results = [
{'repoid': repo.id, 'state': 'enabled'}
for repo in self.base.repos.iter_enabled()]
# Return any matching packages
else: else:
package = 'python3-dnf' packages = dnf.subject.Subject(command).get_best_query(self.base.sack)
results = [self._package_dict(package) for package in packages]
if module.check_mode: self.module.exit_json(results=results)
module.fail_json(msg="`{0}` is not installed, but it is required"
"for the Ansible dnf module.".format(package))
module.run_command(['dnf', 'install', '-y', package], check_rc=True) def _mark_package_install(self, pkg_spec):
global dnf """Mark the package for install."""
try: try:
import dnf self.base.install(pkg_spec)
import dnf.cli except dnf.exceptions.MarkingError:
import dnf.const self.module.fail_json(msg="No package {0} available.".format(pkg_spec))
import dnf.exceptions
import dnf.subject def _parse_spec_group_file(self):
import dnf.util pkg_specs, grp_specs, filenames = [], [], []
except ImportError: for name in self.names:
module.fail_json(msg="Could not import the dnf python module. " if name.endswith(".rpm"):
"Please install `{0}` package.".format(package)) if '://' in name:
name = self.fetch_rpm_from_url(name)
filenames.append(name)
def _configure_base(module, base, conf_file, disable_gpg_check, installroot='/', releasever=None): elif name.startswith("@"):
"""Configure the dnf Base object.""" grp_specs.append(name[1:])
conf = base.conf else:
pkg_specs.append(name)
# Turn off debug messages in the output return pkg_specs, grp_specs, filenames
conf.debuglevel = 0
def _update_only(self, pkgs):
# Set whether to check gpg signatures installed = self.base.sack.query().installed()
conf.gpgcheck = not disable_gpg_check for pkg in pkgs:
if installed.filter(name=pkg):
# Don't prompt for user confirmations self.base.package_upgrade(pkg)
conf.assumeyes = True
def _install_remote_rpms(self, filenames):
# Set installroot if int(dnf.__version__.split(".")[0]) >= 2:
conf.installroot = installroot pkgs = list(sorted(self.base.add_remote_rpms(list(filenames)), reverse=True))
# Set releasever
if releasever is not None:
conf.substitutions['releasever'] = releasever
# Change the configuration file path if provided
if conf_file:
# Fail if we can't read the configuration file.
if not os.access(conf_file, os.R_OK):
module.fail_json(
msg="cannot read configuration file", conf_file=conf_file)
else: else:
conf.config_file_path = conf_file pkgs = []
for filename in filenames:
# Read the configuration file pkgs.append(self.base.add_remote_rpm(filename))
conf.read() if self.update_only:
self._update_only(pkgs)
def _specify_repositories(base, disablerepo, enablerepo):
"""Enable and disable repositories matching the provided patterns."""
base.read_all_repos()
repos = base.repos
# Disable repositories
for repo_pattern in disablerepo:
for repo in repos.get_matching(repo_pattern):
repo.disable()
# Enable repositories
for repo_pattern in enablerepo:
for repo in repos.get_matching(repo_pattern):
repo.enable()
def _base(module, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot, releasever):
"""Return a fully configured dnf Base object."""
base = dnf.Base()
_configure_base(module, base, conf_file, disable_gpg_check, installroot, releasever)
_specify_repositories(base, disablerepo, enablerepo)
base.fill_sack(load_system_repo='auto')
return base
def _package_dict(package):
"""Return a dictionary of information for the package."""
# NOTE: This no longer contains the 'dnfstate' field because it is
# already known based on the query type.
result = {
'name': package.name,
'arch': package.arch,
'epoch': str(package.epoch),
'release': package.release,
'version': package.version,
'repo': package.repoid}
result['nevra'] = '{epoch}:{name}-{version}-{release}.{arch}'.format(
**result)
return result
def list_items(module, base, command):
"""List package info based on the command."""
# Rename updates to upgrades
if command == 'updates':
command = 'upgrades'
# Return the corresponding packages
if command in ['installed', 'upgrades', 'available']:
results = [
_package_dict(package)
for package in getattr(base.sack.query(), command)()]
# Return the enabled repository ids
elif command in ['repos', 'repositories']:
results = [
{'repoid': repo.id, 'state': 'enabled'}
for repo in base.repos.iter_enabled()]
# Return any matching packages
else:
packages = dnf.subject.Subject(command).get_best_query(base.sack)
results = [_package_dict(package) for package in packages]
module.exit_json(results=results)
def _mark_package_install(module, base, pkg_spec):
"""Mark the package for install."""
try:
base.install(pkg_spec)
except dnf.exceptions.MarkingError:
module.fail_json(msg="No package {0} available.".format(pkg_spec))
def _parse_spec_group_file(names):
pkg_specs, grp_specs, filenames = [], [], []
for name in names:
if name.endswith(".rpm"):
filenames.append(name)
elif name.startswith("@"):
grp_specs.append(name[1:])
else: else:
pkg_specs.append(name) for pkg in pkgs:
return pkg_specs, grp_specs, filenames self.base.package_install(pkg)
def ensure(self):
def _install_remote_rpms(base, filenames): # Accumulate failures. Package management modules install what they can
if int(dnf.__version__.split(".")[0]) >= 2: # and fail with a message about what they can't.
pkgs = list(sorted(base.add_remote_rpms(list(filenames)), reverse=True)) failures = []
else: allow_erasing = False
pkgs = []
for filename in filenames: # Autoremove is called alone
pkgs.append(base.add_remote_rpm(filename)) # Jump to remove path where base.autoremove() is run
for pkg in pkgs: if not self.names and self.autoremove:
base.package_install(pkg) self.names = []
self.state = 'absent'
def ensure(module, base, state, names, autoremove): if self.names == ['*'] and self.state == 'latest':
# Accumulate failures. Package management modules install what they can self.base.upgrade_all()
# and fail with a message about what they can't. else:
failures = [] pkg_specs, group_specs, filenames = self._parse_spec_group_file()
allow_erasing = False if group_specs:
self.base.read_comps()
# Autoremove is called alone
# Jump to remove path where base.autoremove() is run pkg_specs = [p.strip() for p in pkg_specs]
if not names and autoremove: filenames = [f.strip() for f in filenames]
names = [] groups = []
state = 'absent' environments = []
for group_spec in (g.strip() for g in group_specs):
if names == ['*'] and state == 'latest': group = self.base.comps.group_by_pattern(group_spec)
base.upgrade_all() if group:
else: groups.append(group.id)
pkg_specs, group_specs, filenames = _parse_spec_group_file(names) else:
if group_specs: environment = self.base.comps.environment_by_pattern(group_spec)
base.read_comps() if environment:
environments.append(environment.id)
pkg_specs = [p.strip() for p in pkg_specs] else:
filenames = [f.strip() for f in filenames] self.module.fail_json(
groups = [] msg="No group {0} available.".format(group_spec))
environments = []
for group_spec in (g.strip() for g in group_specs): if self.state in ['installed', 'present']:
group = base.comps.group_by_pattern(group_spec) # Install files.
if group: self._install_remote_rpms(filenames)
groups.append(group.id)
else: # Install groups.
environment = base.comps.environment_by_pattern(group_spec) for group in groups:
if environment: try:
environments.append(environment.id) self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.Error as e:
# In dnf 2.0 if all the mandatory packages in a group do
# not install, an error is raised. We want to capture
# this but still install as much as possible.
failures.append((group, to_native(e)))
for environment in environments:
try:
self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.Error as e:
failures.append((environment, to_native(e)))
# Install packages.
if self.update_only:
self._update_only(pkg_specs)
else:
for pkg_spec in pkg_specs:
self._mark_package_install(pkg_spec)
elif self.state == 'latest':
# "latest" is same as "installed" for filenames.
self._install_remote_rpms(filenames)
for group in groups:
try:
try:
self.base.group_upgrade(group)
except dnf.exceptions.CompsError:
# If not already installed, try to install.
self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.Error as e:
failures.append((group, to_native(e)))
for environment in environments:
try:
try:
self.base.environment_upgrade(environment)
except dnf.exceptions.CompsError:
# If not already installed, try to install.
self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.Error as e:
failures.append((environment, to_native(e)))
if self.update_only:
self._update_only(pkg_specs)
else: else:
module.fail_json( for pkg_spec in pkg_specs:
msg="No group {0} available.".format(group_spec)) # best effort causes to install the latest package
# even if not previously installed
if state in ['installed', 'present']: self.base.conf.best = True
# Install files. try:
_install_remote_rpms(base, filenames) self.base.install(pkg_spec)
except dnf.exceptions.MarkingError as e:
# Install groups. failures.append((pkg_spec, to_native(e)))
for group in groups:
try: else:
base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES) # state == absent
except dnf.exceptions.Error as e: if self.autoremove:
# In dnf 2.0 if all the mandatory packages in a group do self.base.conf.clean_requirements_on_remove = self.autoremove
# not install, an error is raised. We want to capture
# this but still install as much as possible. if filenames:
failures.append((group, to_native(e))) self.module.fail_json(
msg="Cannot remove paths -- please specify package name.")
for environment in environments:
try: for group in groups:
base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.Error as e:
failures.append((environment, to_native(e)))
# Install packages.
for pkg_spec in pkg_specs:
_mark_package_install(module, base, pkg_spec)
elif state == 'latest':
# "latest" is same as "installed" for filenames.
_install_remote_rpms(base, filenames)
for group in groups:
try:
try: try:
base.group_upgrade(group) self.base.group_remove(group)
except dnf.exceptions.CompsError: except dnf.exceptions.CompsError:
# If not already installed, try to install. # Group is already uninstalled.
base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES) pass
except dnf.exceptions.Error as e:
failures.append((group, to_native(e)))
for environment in environments: for environment in environments:
try:
try: try:
base.environment_upgrade(environment) self.base.environment_remove(environment)
except dnf.exceptions.CompsError: except dnf.exceptions.CompsError:
# If not already installed, try to install. # Environment is already uninstalled.
base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES) pass
except dnf.exceptions.Error as e:
failures.append((environment, to_native(e))) installed = self.base.sack.query().installed()
for pkg_spec in pkg_specs:
for pkg_spec in pkg_specs: if installed.filter(name=pkg_spec):
# best effort causes to install the latest package self.base.remove(pkg_spec)
# even if not previously installed
base.conf.best = True # Like the dnf CLI we want to allow recursive removal of dependent
try: # packages
base.install(pkg_spec) allow_erasing = True
except dnf.exceptions.MarkingError as e:
failures.append((pkg_spec, to_native(e))) if self.autoremove:
self.base.autoremove()
if not self.base.resolve(allow_erasing=allow_erasing):
if failures:
self.module.fail_json(
msg='Failed to install some of the specified packages',
failures=failures
)
self.module.exit_json(msg="Nothing to do")
else: else:
# state == absent if self.module.check_mode:
if autoremove: if failures:
base.conf.clean_requirements_on_remove = autoremove self.module.fail_json(
msg='Failed to install some of the specified packages',
if filenames: failures=failures
module.fail_json( )
msg="Cannot remove paths -- please specify package name.") self.module.exit_json(changed=True)
for group in groups: try:
try: self.base.download_packages(self.base.transaction.install_set)
base.group_remove(group) except dnf.exceptions.DownloadError as e:
except dnf.exceptions.CompsError: self.module.fail_json(msg="Failed to download packages: {0}".format(to_text(e)))
# Group is already uninstalled.
pass response = {'changed': True, 'results': []}
if self.download_only:
for environment in environments: for package in self.base.transaction.install_set:
try: response['results'].append("Downloaded: {0}".format(package))
base.environment_remove(environment) self.module.exit_json(**response)
except dnf.exceptions.CompsError: else:
# Environment is already uninstalled. self.base.do_transaction()
pass for package in self.base.transaction.install_set:
response['results'].append("Installed: {0}".format(package))
installed = base.sack.query().installed() for package in self.base.transaction.remove_set:
for pkg_spec in pkg_specs: response['results'].append("Removed: {0}".format(package))
if installed.filter(name=pkg_spec):
base.remove(pkg_spec)
# Like the dnf CLI we want to allow recursive removal of dependent
# packages
allow_erasing = True
if autoremove:
base.autoremove()
if not base.resolve(allow_erasing=allow_erasing):
if failures:
module.fail_json(msg='Failed to install some of the '
'specified packages',
failures=failures)
module.exit_json(msg="Nothing to do")
else:
if module.check_mode:
if failures: if failures:
module.fail_json(msg='Failed to install some of the ' self.module.fail_json(
'specified packages', msg='Failed to install some of the specified packages',
failures=failures) failures=failures
module.exit_json(changed=True) )
self.module.exit_json(**response)
base.download_packages(base.transaction.install_set)
base.do_transaction() @staticmethod
response = {'changed': True, 'results': []} def has_dnf():
for package in base.transaction.install_set: return HAS_DNF
response['results'].append("Installed: {0}".format(package))
for package in base.transaction.remove_set: def run(self):
response['results'].append("Removed: {0}".format(package)) """The main function."""
if failures: # Check if autoremove is called correctly
module.fail_json(msg='Failed to install some of the ' if self.autoremove:
'specified packages', if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'):
failures=failures) self.module.fail_json(msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__)
module.exit_json(**response) if self.state not in ["absent", None]:
self.module.fail_json(msg="Autoremove should be used alone or with state=absent")
# Set state as installed by default
# This is not set in AnsibleModule() because the following shouldn't happend
# - dnf: autoremove=yes state=installed
if self.state is None:
self.state = 'installed'
if self.list:
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot
)
self.list_items(self.module, self.list)
else:
# Note: base takes a long time to run so we want to check for failure
# before running it.
if not dnf.util.am_i_root():
self.module.fail_json(msg="This command has to be run under the root user.")
self.base = self._base(
self.conf_file, self.disable_gpg_check, self.disablerepo,
self.enablerepo, self.installroot
)
self.ensure()
def main(): def main():
"""The main function.""" # state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
module = AnsibleModule( module = AnsibleModule(
argument_spec=dict( **yumdnf_argument_spec
name=dict(aliases=['pkg'], type='list'), )
state=dict(
choices=['absent', 'present', 'installed', 'removed', 'latest'], module_implementation = DnfModule(module)
default='present', try:
), module_implementation.run()
enablerepo=dict(type='list', default=[]), except dnf.exceptions.RepoError as de:
disablerepo=dict(type='list', default=[]), module.exit_json(msg="Failed to synchronize repodata: {0}".format(de))
list=dict(),
conf_file=dict(default=None, type='path'),
disable_gpg_check=dict(default=False, type='bool'),
installroot=dict(default='/', type='path'),
autoremove=dict(type='bool', default=False),
releasever=dict(default=None),
),
required_one_of=[['name', 'list', 'autoremove']],
mutually_exclusive=[['name', 'list'], ['autoremove', 'list']],
supports_check_mode=True)
params = module.params
_ensure_dnf(module)
# Check if autoremove is called correctly
if params['autoremove']:
if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'):
module.fail_json(msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__)
if params['state'] not in ["absent", None]:
module.fail_json(msg="Autoremove should be used alone or with state=absent")
# Set state as installed by default
# This is not set in AnsibleModule() because the following shouldn't happend
# - dnf: autoremove=yes state=installed
if params['state'] is None:
params['state'] = 'installed'
if params['list']:
base = _base(
module, params['conf_file'], params['disable_gpg_check'],
params['disablerepo'], params['enablerepo'], params['installroot'],
params['releasever'])
list_items(module, base, params['list'])
else:
# Note: base takes a long time to run so we want to check for failure
# before running it.
if not dnf.util.am_i_root():
module.fail_json(msg="This command has to be run under the root user.")
base = _base(
module, params['conf_file'], params['disable_gpg_check'],
params['disablerepo'], params['enablerepo'], params['installroot'],
params['releasever'])
ensure(module, base, params['state'], params['name'], params['autoremove'])
if __name__ == '__main__': if __name__ == '__main__':

File diff suppressed because it is too large Load Diff

@ -232,6 +232,37 @@
dnf: name=sos installroot='/' dnf: name=sos installroot='/'
register: dnf_result register: dnf_result
# Test download_only
- name: uninstall sos for downloadonly test
dnf:
name: sos
state: absent
- name: install sos
dnf:
name: sos
state: latest
download_only: true
register: dnf_result
- name: verify download of sos (part 1 -- dnf "install" succeeded)
assert:
that:
- "dnf_result is success"
- "dnf_result is changed"
- name: uninstall sos (noop)
dnf:
name: sos
state: absent
register: dnf_result
- name: verify download of sos (part 2 -- nothing removed during uninstall)
assert:
that:
- "dnf_result is success"
- "not dnf_result is changed"
# GROUP INSTALL # GROUP INSTALL
# Using 'Books and Guides' because it is only 5 packages and a 7.3 M download on Fedora 26. # Using 'Books and Guides' because it is only 5 packages and a 7.3 M download on Fedora 26.
# It also doesn't install anything that will tamper with our Python environment. # It also doesn't install anything that will tamper with our Python environment.
@ -308,7 +339,8 @@
- "'msg' in dnf_result" - "'msg' in dnf_result"
# cleanup until https://github.com/ansible/ansible/issues/27377 is resolved # cleanup until https://github.com/ansible/ansible/issues/27377 is resolved
- shell: dnf -y group install "Books and Guides" && dnf -y group remove "Books and Guides" - shell: 'dnf -y group install "Books and Guides" && dnf -y group remove "Books and Guides"'
register: shell_dnf_result
# GROUP UPGRADE - this will go to the same method as group install # GROUP UPGRADE - this will go to the same method as group install
# but through group_update - it is its invocation we're testing here # but through group_update - it is its invocation we're testing here
@ -426,3 +458,188 @@
- "'non-existent-rpm' in dnf_result['failures'][0]" - "'non-existent-rpm' in dnf_result['failures'][0]"
- "'no package matched' in dnf_result['failures'][0]" - "'no package matched' in dnf_result['failures'][0]"
- "'Failed to install some of the specified packages' in dnf_result['msg']" - "'Failed to install some of the specified packages' in dnf_result['msg']"
- name: use latest to install httpd
dnf:
name: httpd
state: latest
register: dnf_result
- name: verify httpd was installed
assert:
that:
- "'changed' in dnf_result"
- name: uninstall httpd
dnf:
name: httpd
state: removed
- name: update httpd only if it exists
dnf:
name: httpd
state: latest
update_only: yes
register: dnf_result
- name: verify httpd not installed
assert:
that:
- "not dnf_result is changed"
- name: try to install not compatible arch rpm, should fail
dnf:
name: http://download.fedoraproject.org/pub/epel/7/ppc64le/Packages/b/banner-1.3.4-3.el7.ppc64le.rpm
state: present
register: dnf_result
ignore_errors: True
- name: verify that dnf failed
assert:
that:
- "not dnf_result is changed"
- "dnf_result is failed"
# setup for testing installing an RPM from url
- set_fact:
pkg_name: fpaste
- name: cleanup
dnf:
name: "{{ pkg_name }}"
state: absent
- set_fact:
pkg_url: https://download.fedoraproject.org/pub/fedora/linux/releases/27/Everything/x86_64/os/Packages/f/fpaste-0.3.9.1-1.fc27.noarch.rpm
# setup end
- name: download an rpm
get_url:
url: "{{ pkg_url }}"
dest: "/tmp/{{ pkg_name }}.rpm"
- name: install the downloaded rpm
dnf:
name: "/tmp/{{ pkg_name }}.rpm"
state: present
register: dnf_result
- name: verify installation
assert:
that:
- "dnf_result is success"
- "dnf_result is changed"
- name: install the downloaded rpm again
dnf:
name: "/tmp/{{ pkg_name }}.rpm"
state: present
register: dnf_result
- name: verify installation
assert:
that:
- "dnf_result is success"
- "not dnf_result is changed"
- name: clean up
dnf:
name: "{{ pkg_name }}"
state: absent
- name: install from url
dnf:
name: "{{ pkg_url }}"
state: present
register: dnf_result
- name: verify installation
assert:
that:
- "dnf_result is success"
- "dnf_result is changed"
- "dnf_result is not failed"
- name: verify dnf module outputs
assert:
that:
- "'changed' in dnf_result"
- "'results' in dnf_result"
- name: Create a temp RPM file which does not contain nevra information
file:
name: "/tmp/non_existent_pkg.rpm"
state: touch
- name: Try installing RPM file which does not contain nevra information
dnf:
name: "/tmp/non_existent_pkg.rpm"
state: present
register: no_nevra_info_result
ignore_errors: yes
- name: Verify RPM failed to install
assert:
that:
- "'changed' in no_nevra_info_result"
- "'msg' in no_nevra_info_result"
- name: Delete a temp RPM file
file:
name: "/tmp/non_existent_pkg.rpm"
state: absent
- name: uninstall lsof
dnf:
name: lsof
state: removed
- name: check lsof with rpm
shell: rpm -q lsof
ignore_errors: True
register: rpm_lsof_result
- name: verify lsof is uninstalled
assert:
that:
- "rpm_lsof_result is failed"
- name: exclude lsof
lineinfile:
dest: /etc/dnf/dnf.conf
regexp: (^exclude=)(.)*
line: "exclude=lsof*"
state: present
# begin test case where disable_excludes is supported
- name: Try install lsof without disable_excludes
dnf: name=lsof state=latest
register: dnf_lsof_result
ignore_errors: True
- name: verify lsof did not install because it is in exclude list
assert:
that:
- "dnf_lsof_result is failed"
- name: install lsof with disable_excludes
dnf: name=lsof state=latest disable_excludes=all
register: dnf_lsof_result_using_excludes
- name: verify lsof did install using disable_excludes=all
assert:
that:
- "dnf_lsof_result_using_excludes is success"
- "dnf_lsof_result_using_excludes is changed"
- "dnf_lsof_result_using_excludes is not failed"
- name: remove exclude lsof (cleanup dnf.conf)
lineinfile:
dest: /etc/dnf/dnf.conf
regexp: (^exclude=lsof*)
line: "exclude="
state: present
# end test case where disable_excludes is supported

@ -2,7 +2,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from ansible.compat.tests import unittest from ansible.compat.tests import unittest
from ansible.modules.packaging.os import yum from ansible.modules.packaging.os.yum import YumModule
yum_plugin_load_error = """ yum_plugin_load_error = """
@ -141,34 +141,34 @@ class TestYumUpdateCheckParse(unittest.TestCase):
self.assertIsInstance(result, dict) self.assertIsInstance(result, dict)
def test_empty_output(self): def test_empty_output(self):
res = yum.parse_check_update("") res = YumModule.parse_check_update("")
expected_pkgs = [] expected_pkgs = []
self._assert_expected(expected_pkgs, res) self._assert_expected(expected_pkgs, res)
def test_longname(self): def test_longname(self):
res = yum.parse_check_update(longname) res = YumModule.parse_check_update(longname)
expected_pkgs = ['xxxxxxxxxxxxxxxxxxxxxxxxxx', 'glibc'] expected_pkgs = ['xxxxxxxxxxxxxxxxxxxxxxxxxx', 'glibc']
self._assert_expected(expected_pkgs, res) self._assert_expected(expected_pkgs, res)
def test_plugin_load_error(self): def test_plugin_load_error(self):
res = yum.parse_check_update(yum_plugin_load_error) res = YumModule.parse_check_update(yum_plugin_load_error)
expected_pkgs = [] expected_pkgs = []
self._assert_expected(expected_pkgs, res) self._assert_expected(expected_pkgs, res)
def test_wrapped_output_1(self): def test_wrapped_output_1(self):
res = yum.parse_check_update(wrapped_output_1) res = YumModule.parse_check_update(wrapped_output_1)
expected_pkgs = ["vms-agent"] expected_pkgs = ["vms-agent"]
self._assert_expected(expected_pkgs, res) self._assert_expected(expected_pkgs, res)
def test_wrapped_output_2(self): def test_wrapped_output_2(self):
res = yum.parse_check_update(wrapped_output_2) res = YumModule.parse_check_update(wrapped_output_2)
expected_pkgs = ["empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty", expected_pkgs = ["empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty",
"libtiff"] "libtiff"]
self._assert_expected(expected_pkgs, res) self._assert_expected(expected_pkgs, res)
def test_wrapped_output_3(self): def test_wrapped_output_3(self):
res = yum.parse_check_update(wrapped_output_3) res = YumModule.parse_check_update(wrapped_output_3)
expected_pkgs = ["ceph", "ceph-base", "ceph-common", "ceph-mds", expected_pkgs = ["ceph", "ceph-base", "ceph-common", "ceph-mds",
"ceph-mon", "ceph-osd", "ceph-selinux", "libcephfs1", "ceph-mon", "ceph-osd", "ceph-selinux", "libcephfs1",
"librados2", "libradosstriper1", "librbd1", "librgw2", "librados2", "libradosstriper1", "librbd1", "librgw2",
@ -176,16 +176,16 @@ class TestYumUpdateCheckParse(unittest.TestCase):
self._assert_expected(expected_pkgs, res) self._assert_expected(expected_pkgs, res)
def test_wrapped_output_4(self): def test_wrapped_output_4(self):
res = yum.parse_check_update(wrapped_output_4) res = YumModule.parse_check_update(wrapped_output_4)
expected_pkgs = ["ipxe-roms-qemu", "quota", "quota-nls", "rdma", "screen", expected_pkgs = ["ipxe-roms-qemu", "quota", "quota-nls", "rdma", "screen",
"sos", "sssd-client"] "sos", "sssd-client"]
self._assert_expected(expected_pkgs, res) self._assert_expected(expected_pkgs, res)
def test_wrapped_output_rhel7(self): def test_wrapped_output_rhel7(self):
res = yum.parse_check_update(unwrapped_output_rhel7) res = YumModule.parse_check_update(unwrapped_output_rhel7)
self._assert_expected(unwrapped_output_rhel7_expected_pkgs, res) self._assert_expected(unwrapped_output_rhel7_expected_pkgs, res)
def test_wrapped_output_rhel7_obsoletes(self): def test_wrapped_output_rhel7_obsoletes(self):
res = yum.parse_check_update(unwrapped_output_rhel7_obsoletes) res = YumModule.parse_check_update(unwrapped_output_rhel7_obsoletes)
self._assert_expected(unwrapped_output_rhel7_expected_pkgs, res) self._assert_expected(unwrapped_output_rhel7_expected_pkgs, res)

Loading…
Cancel
Save