Refactor yum and dnf, add feature parity (#43621)

* Refactor yum and dnf, add feature parity

Signed-off-by: Adam Miller <admiller@redhat.com>

* remove unnecessary module_utils, move the classes into the module code

Signed-off-by: Adam Miller <admiller@redhat.com>

* remove yum -> yum4, out of scope

Signed-off-by: Adam Miller <admiller@redhat.com>

* use ABCMeta

Signed-off-by: Adam Miller <admiller@redhat.com>

* re-arrange run() caller vs callee

Signed-off-by: Adam Miller <admiller@redhat.com>

* make sanity checks happy

Signed-off-by: Adam Miller <admiller@redhat.com>

* fix yum unit tests

Signed-off-by: Adam Miller <admiller@redhat.com>

* remove unecessary debug statements, fix typo

Signed-off-by: Adam Miller <admiller@redhat.com>

* fix licensing and attribution in yumdnf module_util

Signed-off-by: Adam Miller <admiller@redhat.com>

* include fix from PR 40737

original commit 5cbda9658a
original Author: Strahinja Kustudic <kustodian@gmail.com>

yum will fail on 'No space left on device', fixes #32791 (#40737)

During the installing of packages if yum runs out of free disk space,
some post install scripts could fail (like e.g. when the kernel
package generates initramfs), but yum would still exit with a status
0.  This is bad, especially for the kernel package, because it makes
it unable to boot.  Because the yum module is usually used for
automation, which means the users cannot read every message yum
prints, it's better that the yum module fails if it detects that
there is no free space on the disk.

Signed-off-by: Adam Miller <admiller@redhat.com>

* Revert "fix licensing and attribution in yumdnf module_util"

This reverts commit 59e11de5a2.

* move fetch_rpm_from_url out of yumdnf module_util

Signed-off-by: Adam Miller <admiller@redhat.com>

* fix the move of fetch_rpm_from_url

Signed-off-by: Adam Miller <admiller@redhat.com>
pull/44323/head
Adam Miller 6 years ago committed by ansibot
parent d7921b4d5b
commit 6d95624c22

@ -0,0 +1,99 @@
# -*- coding: utf-8 -*-
#
# # Copyright: (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
# Contributing Authors:
# - Ansible Core Team
# - Eduard Snesarev (@verm666)
# - Berend De Schouwer (@berenddeschouwer)
# - Abhijeet Kasurde (@Akasurde)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import os
import tempfile
from abc import ABCMeta, abstractmethod
from ansible.module_utils._text import to_native
from ansible.module_utils.six import with_metaclass
yumdnf_argument_spec = dict(
argument_spec=dict(
allow_downgrade=dict(type='bool', default=False),
autoremove=dict(type='bool', default=False),
bugfix=dict(required=False, type='bool', default=False),
conf_file=dict(type='str'),
disable_excludes=dict(type='str', default=None, choices=['all', 'main', 'repoid']),
disable_gpg_check=dict(type='bool', default=False),
disable_plugin=dict(type='list', default=[]),
disablerepo=dict(type='list', default=[]),
download_only=dict(type='bool', default=False),
enable_plugin=dict(type='list', default=[]),
enablerepo=dict(type='list', default=[]),
exclude=dict(type='list', default=[]),
installroot=dict(type='str', default="/"),
install_repoquery=dict(type='bool', default=True),
list=dict(type='str'),
name=dict(type='list', aliases=['pkg'], default=[]),
releasever=dict(default=None),
security=dict(type='bool', default=False),
skip_broken=dict(type='bool', default=False),
# removed==absent, installed==present, these are accepted as aliases
state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
update_cache=dict(type='bool', default=False, aliases=['expire-cache']),
update_only=dict(required=False, default="no", type='bool'),
validate_certs=dict(type='bool', default=True),
# this should not be needed, but exists as a failsafe
),
required_one_of=[['name', 'list']],
mutually_exclusive=[['name', 'list']],
supports_check_mode=True,
)
class YumDnf(with_metaclass(ABCMeta, object)):
"""
Abstract class that handles the population of instance variables that should
be identical between both YUM and DNF modules because of the feature parity
and shared argument spec
"""
def __init__(self, module):
self.module = module
self.allow_downgrade = self.module.params['allow_downgrade']
self.autoremove = self.module.params['autoremove']
self.bugfix = self.module.params['bugfix']
self.conf_file = self.module.params['conf_file']
self.disable_excludes = self.module.params['disable_excludes']
self.disable_gpg_check = self.module.params['disable_gpg_check']
self.disable_plugin = self.module.params['disable_plugin']
self.disablerepo = self.module.params.get('disablerepo', [])
self.download_only = self.module.params['download_only']
self.enable_plugin = self.module.params['enable_plugin']
self.enablerepo = self.module.params.get('enablerepo', [])
self.exclude = self.module.params['exclude']
self.installroot = self.module.params['installroot']
self.install_repoquery = self.module.params['install_repoquery']
self.list = self.module.params['list']
self.names = [p.strip() for p in self.module.params['name']]
self.releasever = self.module.params['releasever']
self.security = self.module.params['security']
self.skip_broken = self.module.params['skip_broken']
self.state = self.module.params['state']
self.update_only = self.module.params['update_only']
self.update_cache = self.module.params['update_cache']
self.validate_certs = self.module.params['validate_certs']
# It's possible someone passed a comma separated string since it used
# to be a string type, so we should handle that
if self.enablerepo and len(self.enablerepo) == 1 and ',' in self.enablerepo:
self.enablerepo = self.module.params['enablerepo'].split(',')
if self.disablerepo and len(self.disablerepo) == 1 and ',' in self.disablerepo:
self.disablerepo = self.module.params['disablerepo'].split(',')
if self.exclude and len(self.exclude) == 1 and ',' in self.exclude:
self.exclude = self.module.params['exclude'].split(',')
@abstractmethod
def run(self):
raise NotImplementedError

@ -3,6 +3,7 @@
# Copyright 2015 Cristian van Ee <cristian at cvee.org> # Copyright 2015 Cristian van Ee <cristian at cvee.org>
# Copyright 2015 Igor Gnatenko <i.gnatenko.brain@gmail.com> # Copyright 2015 Igor Gnatenko <i.gnatenko.brain@gmail.com>
# Copyright 2018 Adam Miller <admiller@redhat.com>
# #
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@ -88,6 +89,95 @@ options:
type: bool type: bool
default: false default: false
version_added: "2.4" version_added: "2.4"
exclude:
description:
- Package name(s) to exclude when state=present, or latest. This can be a
list or a comma separated string.
version_added: "2.7"
skip_broken:
description:
- Skip packages with broken dependencies(devsolve) and are causing problems.
type: bool
default: "no"
version_added: "2.7"
update_cache:
description:
- Force yum to check if cache is out of date and redownload if needed.
Has an effect only if state is I(present) or I(latest).
type: bool
default: "no"
aliases: [ expire-cache ]
version_added: "2.7"
update_only:
description:
- When using latest, only update installed packages. Do not install packages.
- Has an effect only if state is I(latest)
required: false
default: "no"
type: bool
version_added: "2.7"
security:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked security related.
type: bool
default: "no"
version_added: "2.7"
bugfix:
description:
- If set to C(yes), and C(state=latest) then only installs updates that have been marked bugfix related.
required: false
default: "no"
type: bool
version_added: "2.7"
enable_plugin:
description:
- I(Plugin) name to enable for the install/update operation.
The enabled plugin will not persist beyond the transaction.
required: false
version_added: "2.7"
disable_plugin:
description:
- I(Plugin) name to disable for the install/update operation.
The disabled plugins will not persist beyond the transaction.
required: false
version_added: "2.7"
disable_excludes:
description:
- Disable the excludes defined in DNF config files.
- If set to C(all), disables all excludes.
- If set to C(main), disable excludes defined in [main] in yum.conf.
- If set to C(repoid), disable excludes defined for given repo id.
required: false
choices: [ all, main, repoid ]
version_added: "2.7"
validate_certs:
description:
- This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
type: bool
default: "yes"
version_added: "2.7"
allow_downgrade:
description:
- This is effectively a no-op in DNF as it is the default behavior of dnf, but is an accepted parameter for feature
parity/compatibility with the I(yum) module.
type: bool
default: False
version_added: "2.7"
install_repoquery:
description:
- This is effectively a no-op in DNF as it is not needed with DNF, but is an accepted parameter for feature
parity/compatibility with the I(yum) module.
type: bool
default: True
version_added: "2.7"
download_only:
description:
- Only download the packages, do not install them.
required: false
default: "no"
type: bool
version_added: "2.7"
notes: notes:
- When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option. - When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
requirements: requirements:
@ -98,6 +188,7 @@ author:
- '"Igor Gnatenko (@ignatenkobrain)" <i.gnatenko.brain@gmail.com>' - '"Igor Gnatenko (@ignatenkobrain)" <i.gnatenko.brain@gmail.com>'
- '"Cristian van Ee (@DJMuggs)" <cristian at cvee.org>' - '"Cristian van Ee (@DJMuggs)" <cristian at cvee.org>'
- "Berend De Schouwer (github.com/berenddeschouwer)" - "Berend De Schouwer (github.com/berenddeschouwer)"
- '"Adam Miller (@maxamillion)" <admiller@redhat.com>"'
''' '''
EXAMPLES = ''' EXAMPLES = '''
@ -147,7 +238,9 @@ EXAMPLES = '''
state: absent state: absent
autoremove: no autoremove: no
''' '''
import os import os
import tempfile
try: try:
import dnf import dnf
@ -160,24 +253,65 @@ try:
except ImportError: except ImportError:
HAS_DNF = False HAS_DNF = False
from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native, to_text
from ansible.module_utils._text import to_native from ansible.module_utils.urls import fetch_url
from ansible.module_utils.six import PY2 from ansible.module_utils.six import PY2
from distutils.version import LooseVersion from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.yumdnf import YumDnf, yumdnf_argument_spec
# 64k. Number of bytes to read at a time when manually downloading pkgs via a url
BUFSIZE = 65536
class DnfModule(YumDnf):
"""
DNF Ansible module back-end implementation
"""
def _ensure_dnf(module): def __init__(self, module):
# This populates instance vars for all argument spec params
super(DnfModule, self).__init__(module)
self._ensure_dnf()
def fetch_rpm_from_url(self, spec):
# FIXME: Remove this once this PR is merged:
# https://github.com/ansible/ansible/pull/19172
# download package so that we can query it
package_name, dummy = os.path.splitext(str(spec.rsplit('/', 1)[1]))
package_file = tempfile.NamedTemporaryFile(dir=self.module.tmpdir, prefix=package_name, suffix='.rpm', delete=False)
self.module.add_cleanup_file(package_file.name)
try:
rsp, info = fetch_url(self.module, spec)
if not rsp:
self.module.fail_json(msg="Failure downloading %s, %s" % (spec, info['msg']))
data = rsp.read(BUFSIZE)
while data:
package_file.write(data)
data = rsp.read(BUFSIZE)
package_file.close()
except Exception as e:
self.module.fail_json(msg="Failure downloading %s, %s" % (spec, to_native(e)))
return package_file.name
def _ensure_dnf(self):
if not HAS_DNF: if not HAS_DNF:
if PY2: if PY2:
package = 'python2-dnf' package = 'python2-dnf'
else: else:
package = 'python3-dnf' package = 'python3-dnf'
if module.check_mode: if self.module.check_mode:
module.fail_json(msg="`{0}` is not installed, but it is required" self.module.fail_json(
"for the Ansible dnf module.".format(package)) msg="`{0}` is not installed, but it is required"
"for the Ansible dnf module.".format(package)
)
module.run_command(['dnf', 'install', '-y', package], check_rc=True) self.module.run_command(['dnf', 'install', '-y', package], check_rc=True)
global dnf global dnf
try: try:
import dnf import dnf
@ -187,12 +321,21 @@ def _ensure_dnf(module):
import dnf.subject import dnf.subject
import dnf.util import dnf.util
except ImportError: except ImportError:
module.fail_json(msg="Could not import the dnf python module. " self.module.fail_json(
"Please install `{0}` package.".format(package)) msg="Could not import the dnf python module. "
"Please install `{0}` package.".format(package)
)
def _configure_base(module, base, conf_file, disable_gpg_check, installroot='/', releasever=None): def _configure_base(self, base, conf_file, disable_gpg_check, installroot='/'):
"""Configure the dnf Base object.""" """Configure the dnf Base object."""
if self.enable_plugin and self.disable_plugin:
base.init_plugins(self.disable_plugin, self.enable_plugin)
elif self.enable_plugin:
base.init_plugins(enable_plugins=self.enable_plugin)
elif self.disable_plugin:
base.init_plugins(self.disable_plugin)
conf = base.conf conf = base.conf
# Turn off debug messages in the output # Turn off debug messages in the output
@ -207,15 +350,30 @@ def _configure_base(module, base, conf_file, disable_gpg_check, installroot='/',
# Set installroot # Set installroot
conf.installroot = installroot conf.installroot = installroot
# Set excludes
if self.exclude:
conf.exclude(self.exclude)
# Set disable_excludes
if self.disable_excludes:
conf.disable_excludes = [self.disable_excludes]
# Set releasever # Set releasever
if releasever is not None: if self.releasever is not None:
conf.substitutions['releasever'] = releasever conf.substitutions['releasever'] = self.releasever
# Set skip_broken (in dnf this is strict=0)
if self.skip_broken:
conf.strict = 0
if self.download_only:
conf.downloadonly = True
# Change the configuration file path if provided # Change the configuration file path if provided
if conf_file: if conf_file:
# Fail if we can't read the configuration file. # Fail if we can't read the configuration file.
if not os.access(conf_file, os.R_OK): if not os.access(conf_file, os.R_OK):
module.fail_json( self.module.fail_json(
msg="cannot read configuration file", conf_file=conf_file) msg="cannot read configuration file", conf_file=conf_file)
else: else:
conf.config_file_path = conf_file conf.config_file_path = conf_file
@ -223,8 +381,7 @@ def _configure_base(module, base, conf_file, disable_gpg_check, installroot='/',
# Read the configuration file # Read the configuration file
conf.read() conf.read()
def _specify_repositories(self, base, disablerepo, enablerepo):
def _specify_repositories(base, disablerepo, enablerepo):
"""Enable and disable repositories matching the provided patterns.""" """Enable and disable repositories matching the provided patterns."""
base.read_all_repos() base.read_all_repos()
repos = base.repos repos = base.repos
@ -239,17 +396,23 @@ def _specify_repositories(base, disablerepo, enablerepo):
for repo in repos.get_matching(repo_pattern): for repo in repos.get_matching(repo_pattern):
repo.enable() repo.enable()
def _base(self, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot):
def _base(module, conf_file, disable_gpg_check, disablerepo, enablerepo, installroot, releasever):
"""Return a fully configured dnf Base object.""" """Return a fully configured dnf Base object."""
base = dnf.Base() base = dnf.Base()
_configure_base(module, base, conf_file, disable_gpg_check, installroot, releasever) self._configure_base(base, conf_file, disable_gpg_check, installroot)
_specify_repositories(base, disablerepo, enablerepo) self._specify_repositories(base, disablerepo, enablerepo)
base.fill_sack(load_system_repo='auto') base.fill_sack(load_system_repo='auto')
if self.bugfix:
key = {'advisory_type__eq': 'bugfix'}
base._update_security_filters = [base.sack.query().filter(**key)]
if self.security:
key = {'advisory_type__eq': 'security'}
base._update_security_filters = [base.sack.query().filter(**key)]
if self.update_cache:
base.update_cache()
return base return base
def _package_dict(self, package):
def _package_dict(package):
"""Return a dictionary of information for the package.""" """Return a dictionary of information for the package."""
# NOTE: This no longer contains the 'dnfstate' field because it is # NOTE: This no longer contains the 'dnfstate' field because it is
# already known based on the query type. # already known based on the query type.
@ -265,8 +428,7 @@ def _package_dict(package):
return result return result
def list_items(self, command):
def list_items(module, base, command):
"""List package info based on the command.""" """List package info based on the command."""
# Rename updates to upgrades # Rename updates to upgrades
if command == 'updates': if command == 'updates':
@ -275,33 +437,33 @@ def list_items(module, base, command):
# Return the corresponding packages # Return the corresponding packages
if command in ['installed', 'upgrades', 'available']: if command in ['installed', 'upgrades', 'available']:
results = [ results = [
_package_dict(package) self._package_dict(package)
for package in getattr(base.sack.query(), command)()] for package in getattr(self.base.sack.query(), command)()]
# Return the enabled repository ids # Return the enabled repository ids
elif command in ['repos', 'repositories']: elif command in ['repos', 'repositories']:
results = [ results = [
{'repoid': repo.id, 'state': 'enabled'} {'repoid': repo.id, 'state': 'enabled'}
for repo in base.repos.iter_enabled()] for repo in self.base.repos.iter_enabled()]
# Return any matching packages # Return any matching packages
else: else:
packages = dnf.subject.Subject(command).get_best_query(base.sack) packages = dnf.subject.Subject(command).get_best_query(self.base.sack)
results = [_package_dict(package) for package in packages] results = [self._package_dict(package) for package in packages]
module.exit_json(results=results) self.module.exit_json(results=results)
def _mark_package_install(self, pkg_spec):
def _mark_package_install(module, base, pkg_spec):
"""Mark the package for install.""" """Mark the package for install."""
try: try:
base.install(pkg_spec) self.base.install(pkg_spec)
except dnf.exceptions.MarkingError: except dnf.exceptions.MarkingError:
module.fail_json(msg="No package {0} available.".format(pkg_spec)) self.module.fail_json(msg="No package {0} available.".format(pkg_spec))
def _parse_spec_group_file(names): def _parse_spec_group_file(self):
pkg_specs, grp_specs, filenames = [], [], [] pkg_specs, grp_specs, filenames = [], [], []
for name in names: for name in self.names:
if name.endswith(".rpm"): if name.endswith(".rpm"):
if '://' in name:
name = self.fetch_rpm_from_url(name)
filenames.append(name) filenames.append(name)
elif name.startswith("@"): elif name.startswith("@"):
grp_specs.append(name[1:]) grp_specs.append(name[1:])
@ -309,19 +471,26 @@ def _parse_spec_group_file(names):
pkg_specs.append(name) pkg_specs.append(name)
return pkg_specs, grp_specs, filenames return pkg_specs, grp_specs, filenames
def _update_only(self, pkgs):
installed = self.base.sack.query().installed()
for pkg in pkgs:
if installed.filter(name=pkg):
self.base.package_upgrade(pkg)
def _install_remote_rpms(base, filenames): def _install_remote_rpms(self, filenames):
if int(dnf.__version__.split(".")[0]) >= 2: if int(dnf.__version__.split(".")[0]) >= 2:
pkgs = list(sorted(base.add_remote_rpms(list(filenames)), reverse=True)) pkgs = list(sorted(self.base.add_remote_rpms(list(filenames)), reverse=True))
else: else:
pkgs = [] pkgs = []
for filename in filenames: for filename in filenames:
pkgs.append(base.add_remote_rpm(filename)) pkgs.append(self.base.add_remote_rpm(filename))
if self.update_only:
self._update_only(pkgs)
else:
for pkg in pkgs: for pkg in pkgs:
base.package_install(pkg) self.base.package_install(pkg)
def ensure(module, base, state, names, autoremove): def ensure(self):
# Accumulate failures. Package management modules install what they can # Accumulate failures. Package management modules install what they can
# and fail with a message about what they can't. # and fail with a message about what they can't.
failures = [] failures = []
@ -329,41 +498,41 @@ def ensure(module, base, state, names, autoremove):
# Autoremove is called alone # Autoremove is called alone
# Jump to remove path where base.autoremove() is run # Jump to remove path where base.autoremove() is run
if not names and autoremove: if not self.names and self.autoremove:
names = [] self.names = []
state = 'absent' self.state = 'absent'
if names == ['*'] and state == 'latest': if self.names == ['*'] and self.state == 'latest':
base.upgrade_all() self.base.upgrade_all()
else: else:
pkg_specs, group_specs, filenames = _parse_spec_group_file(names) pkg_specs, group_specs, filenames = self._parse_spec_group_file()
if group_specs: if group_specs:
base.read_comps() self.base.read_comps()
pkg_specs = [p.strip() for p in pkg_specs] pkg_specs = [p.strip() for p in pkg_specs]
filenames = [f.strip() for f in filenames] filenames = [f.strip() for f in filenames]
groups = [] groups = []
environments = [] environments = []
for group_spec in (g.strip() for g in group_specs): for group_spec in (g.strip() for g in group_specs):
group = base.comps.group_by_pattern(group_spec) group = self.base.comps.group_by_pattern(group_spec)
if group: if group:
groups.append(group.id) groups.append(group.id)
else: else:
environment = base.comps.environment_by_pattern(group_spec) environment = self.base.comps.environment_by_pattern(group_spec)
if environment: if environment:
environments.append(environment.id) environments.append(environment.id)
else: else:
module.fail_json( self.module.fail_json(
msg="No group {0} available.".format(group_spec)) msg="No group {0} available.".format(group_spec))
if state in ['installed', 'present']: if self.state in ['installed', 'present']:
# Install files. # Install files.
_install_remote_rpms(base, filenames) self._install_remote_rpms(filenames)
# Install groups. # Install groups.
for group in groups: for group in groups:
try: try:
base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES) self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.Error as e: except dnf.exceptions.Error as e:
# In dnf 2.0 if all the mandatory packages in a group do # In dnf 2.0 if all the mandatory packages in a group do
# not install, an error is raised. We want to capture # not install, an error is raised. We want to capture
@ -372,166 +541,188 @@ def ensure(module, base, state, names, autoremove):
for environment in environments: for environment in environments:
try: try:
base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES) self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.Error as e: except dnf.exceptions.Error as e:
failures.append((environment, to_native(e))) failures.append((environment, to_native(e)))
# Install packages. # Install packages.
if self.update_only:
self._update_only(pkg_specs)
else:
for pkg_spec in pkg_specs: for pkg_spec in pkg_specs:
_mark_package_install(module, base, pkg_spec) self._mark_package_install(pkg_spec)
elif state == 'latest': elif self.state == 'latest':
# "latest" is same as "installed" for filenames. # "latest" is same as "installed" for filenames.
_install_remote_rpms(base, filenames) self._install_remote_rpms(filenames)
for group in groups: for group in groups:
try: try:
try: try:
base.group_upgrade(group) self.base.group_upgrade(group)
except dnf.exceptions.CompsError: except dnf.exceptions.CompsError:
# If not already installed, try to install. # If not already installed, try to install.
base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES) self.base.group_install(group, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.Error as e: except dnf.exceptions.Error as e:
failures.append((group, to_native(e))) failures.append((group, to_native(e)))
for environment in environments: for environment in environments:
try: try:
try: try:
base.environment_upgrade(environment) self.base.environment_upgrade(environment)
except dnf.exceptions.CompsError: except dnf.exceptions.CompsError:
# If not already installed, try to install. # If not already installed, try to install.
base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES) self.base.environment_install(environment, dnf.const.GROUP_PACKAGE_TYPES)
except dnf.exceptions.Error as e: except dnf.exceptions.Error as e:
failures.append((environment, to_native(e))) failures.append((environment, to_native(e)))
if self.update_only:
self._update_only(pkg_specs)
else:
for pkg_spec in pkg_specs: for pkg_spec in pkg_specs:
# best effort causes to install the latest package # best effort causes to install the latest package
# even if not previously installed # even if not previously installed
base.conf.best = True self.base.conf.best = True
try: try:
base.install(pkg_spec) self.base.install(pkg_spec)
except dnf.exceptions.MarkingError as e: except dnf.exceptions.MarkingError as e:
failures.append((pkg_spec, to_native(e))) failures.append((pkg_spec, to_native(e)))
else: else:
# state == absent # state == absent
if autoremove: if self.autoremove:
base.conf.clean_requirements_on_remove = autoremove self.base.conf.clean_requirements_on_remove = self.autoremove
if filenames: if filenames:
module.fail_json( self.module.fail_json(
msg="Cannot remove paths -- please specify package name.") msg="Cannot remove paths -- please specify package name.")
for group in groups: for group in groups:
try: try:
base.group_remove(group) self.base.group_remove(group)
except dnf.exceptions.CompsError: except dnf.exceptions.CompsError:
# Group is already uninstalled. # Group is already uninstalled.
pass pass
for environment in environments: for environment in environments:
try: try:
base.environment_remove(environment) self.base.environment_remove(environment)
except dnf.exceptions.CompsError: except dnf.exceptions.CompsError:
# Environment is already uninstalled. # Environment is already uninstalled.
pass pass
installed = base.sack.query().installed() installed = self.base.sack.query().installed()
for pkg_spec in pkg_specs: for pkg_spec in pkg_specs:
if installed.filter(name=pkg_spec): if installed.filter(name=pkg_spec):
base.remove(pkg_spec) self.base.remove(pkg_spec)
# Like the dnf CLI we want to allow recursive removal of dependent # Like the dnf CLI we want to allow recursive removal of dependent
# packages # packages
allow_erasing = True allow_erasing = True
if autoremove: if self.autoremove:
base.autoremove() self.base.autoremove()
if not base.resolve(allow_erasing=allow_erasing): if not self.base.resolve(allow_erasing=allow_erasing):
if failures: if failures:
module.fail_json(msg='Failed to install some of the ' self.module.fail_json(
'specified packages', msg='Failed to install some of the specified packages',
failures=failures) failures=failures
module.exit_json(msg="Nothing to do") )
self.module.exit_json(msg="Nothing to do")
else: else:
if module.check_mode: if self.module.check_mode:
if failures: if failures:
module.fail_json(msg='Failed to install some of the ' self.module.fail_json(
'specified packages', msg='Failed to install some of the specified packages',
failures=failures) failures=failures
module.exit_json(changed=True) )
self.module.exit_json(changed=True)
try:
self.base.download_packages(self.base.transaction.install_set)
except dnf.exceptions.DownloadError as e:
self.module.fail_json(msg="Failed to download packages: {0}".format(to_text(e)))
base.download_packages(base.transaction.install_set)
base.do_transaction()
response = {'changed': True, 'results': []} response = {'changed': True, 'results': []}
for package in base.transaction.install_set: if self.download_only:
for package in self.base.transaction.install_set:
response['results'].append("Downloaded: {0}".format(package))
self.module.exit_json(**response)
else:
self.base.do_transaction()
for package in self.base.transaction.install_set:
response['results'].append("Installed: {0}".format(package)) response['results'].append("Installed: {0}".format(package))
for package in base.transaction.remove_set: for package in self.base.transaction.remove_set:
response['results'].append("Removed: {0}".format(package)) response['results'].append("Removed: {0}".format(package))
if failures: if failures:
module.fail_json(msg='Failed to install some of the ' self.module.fail_json(
'specified packages', msg='Failed to install some of the specified packages',
failures=failures) failures=failures
module.exit_json(**response) )
self.module.exit_json(**response)
@staticmethod
def has_dnf():
return HAS_DNF
def main(): def run(self):
"""The main function.""" """The main function."""
module = AnsibleModule(
argument_spec=dict(
name=dict(aliases=['pkg'], type='list'),
state=dict(
choices=['absent', 'present', 'installed', 'removed', 'latest'],
default='present',
),
enablerepo=dict(type='list', default=[]),
disablerepo=dict(type='list', default=[]),
list=dict(),
conf_file=dict(default=None, type='path'),
disable_gpg_check=dict(default=False, type='bool'),
installroot=dict(default='/', type='path'),
autoremove=dict(type='bool', default=False),
releasever=dict(default=None),
),
required_one_of=[['name', 'list', 'autoremove']],
mutually_exclusive=[['name', 'list'], ['autoremove', 'list']],
supports_check_mode=True)
params = module.params
_ensure_dnf(module)
# Check if autoremove is called correctly # Check if autoremove is called correctly
if params['autoremove']: if self.autoremove:
if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'): if LooseVersion(dnf.__version__) < LooseVersion('2.0.1'):
module.fail_json(msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__) self.module.fail_json(msg="Autoremove requires dnf>=2.0.1. Current dnf version is %s" % dnf.__version__)
if params['state'] not in ["absent", None]: if self.state not in ["absent", None]:
module.fail_json(msg="Autoremove should be used alone or with state=absent") self.module.fail_json(msg="Autoremove should be used alone or with state=absent")
# Set state as installed by default # Set state as installed by default
# This is not set in AnsibleModule() because the following shouldn't happend # This is not set in AnsibleModule() because the following shouldn't happend
# - dnf: autoremove=yes state=installed # - dnf: autoremove=yes state=installed
if params['state'] is None: if self.state is None:
params['state'] = 'installed' self.state = 'installed'
if params['list']: if self.list:
base = _base( self.base = self._base(
module, params['conf_file'], params['disable_gpg_check'], self.conf_file, self.disable_gpg_check, self.disablerepo,
params['disablerepo'], params['enablerepo'], params['installroot'], self.enablerepo, self.installroot
params['releasever']) )
list_items(module, base, params['list']) self.list_items(self.module, self.list)
else: else:
# Note: base takes a long time to run so we want to check for failure # Note: base takes a long time to run so we want to check for failure
# before running it. # before running it.
if not dnf.util.am_i_root(): if not dnf.util.am_i_root():
module.fail_json(msg="This command has to be run under the root user.") self.module.fail_json(msg="This command has to be run under the root user.")
base = _base( self.base = self._base(
module, params['conf_file'], params['disable_gpg_check'], self.conf_file, self.disable_gpg_check, self.disablerepo,
params['disablerepo'], params['enablerepo'], params['installroot'], self.enablerepo, self.installroot
params['releasever']) )
self.ensure()
def main():
# state=installed name=pkgspec
# state=removed name=pkgspec
# state=latest name=pkgspec
#
# informational commands:
# list=installed
# list=updates
# list=available
# list=repos
# list=pkgspec
module = AnsibleModule(
**yumdnf_argument_spec
)
ensure(module, base, params['state'], params['name'], params['autoremove']) module_implementation = DnfModule(module)
try:
module_implementation.run()
except dnf.exceptions.RepoError as de:
module.exit_json(msg="Failed to synchronize repodata: {0}".format(de))
if __name__ == '__main__': if __name__ == '__main__':

File diff suppressed because it is too large Load Diff

@ -232,6 +232,37 @@
dnf: name=sos installroot='/' dnf: name=sos installroot='/'
register: dnf_result register: dnf_result
# Test download_only
- name: uninstall sos for downloadonly test
dnf:
name: sos
state: absent
- name: install sos
dnf:
name: sos
state: latest
download_only: true
register: dnf_result
- name: verify download of sos (part 1 -- dnf "install" succeeded)
assert:
that:
- "dnf_result is success"
- "dnf_result is changed"
- name: uninstall sos (noop)
dnf:
name: sos
state: absent
register: dnf_result
- name: verify download of sos (part 2 -- nothing removed during uninstall)
assert:
that:
- "dnf_result is success"
- "not dnf_result is changed"
# GROUP INSTALL # GROUP INSTALL
# Using 'Books and Guides' because it is only 5 packages and a 7.3 M download on Fedora 26. # Using 'Books and Guides' because it is only 5 packages and a 7.3 M download on Fedora 26.
# It also doesn't install anything that will tamper with our Python environment. # It also doesn't install anything that will tamper with our Python environment.
@ -308,7 +339,8 @@
- "'msg' in dnf_result" - "'msg' in dnf_result"
# cleanup until https://github.com/ansible/ansible/issues/27377 is resolved # cleanup until https://github.com/ansible/ansible/issues/27377 is resolved
- shell: dnf -y group install "Books and Guides" && dnf -y group remove "Books and Guides" - shell: 'dnf -y group install "Books and Guides" && dnf -y group remove "Books and Guides"'
register: shell_dnf_result
# GROUP UPGRADE - this will go to the same method as group install # GROUP UPGRADE - this will go to the same method as group install
# but through group_update - it is its invocation we're testing here # but through group_update - it is its invocation we're testing here
@ -426,3 +458,188 @@
- "'non-existent-rpm' in dnf_result['failures'][0]" - "'non-existent-rpm' in dnf_result['failures'][0]"
- "'no package matched' in dnf_result['failures'][0]" - "'no package matched' in dnf_result['failures'][0]"
- "'Failed to install some of the specified packages' in dnf_result['msg']" - "'Failed to install some of the specified packages' in dnf_result['msg']"
- name: use latest to install httpd
dnf:
name: httpd
state: latest
register: dnf_result
- name: verify httpd was installed
assert:
that:
- "'changed' in dnf_result"
- name: uninstall httpd
dnf:
name: httpd
state: removed
- name: update httpd only if it exists
dnf:
name: httpd
state: latest
update_only: yes
register: dnf_result
- name: verify httpd not installed
assert:
that:
- "not dnf_result is changed"
- name: try to install not compatible arch rpm, should fail
dnf:
name: http://download.fedoraproject.org/pub/epel/7/ppc64le/Packages/b/banner-1.3.4-3.el7.ppc64le.rpm
state: present
register: dnf_result
ignore_errors: True
- name: verify that dnf failed
assert:
that:
- "not dnf_result is changed"
- "dnf_result is failed"
# setup for testing installing an RPM from url
- set_fact:
pkg_name: fpaste
- name: cleanup
dnf:
name: "{{ pkg_name }}"
state: absent
- set_fact:
pkg_url: https://download.fedoraproject.org/pub/fedora/linux/releases/27/Everything/x86_64/os/Packages/f/fpaste-0.3.9.1-1.fc27.noarch.rpm
# setup end
- name: download an rpm
get_url:
url: "{{ pkg_url }}"
dest: "/tmp/{{ pkg_name }}.rpm"
- name: install the downloaded rpm
dnf:
name: "/tmp/{{ pkg_name }}.rpm"
state: present
register: dnf_result
- name: verify installation
assert:
that:
- "dnf_result is success"
- "dnf_result is changed"
- name: install the downloaded rpm again
dnf:
name: "/tmp/{{ pkg_name }}.rpm"
state: present
register: dnf_result
- name: verify installation
assert:
that:
- "dnf_result is success"
- "not dnf_result is changed"
- name: clean up
dnf:
name: "{{ pkg_name }}"
state: absent
- name: install from url
dnf:
name: "{{ pkg_url }}"
state: present
register: dnf_result
- name: verify installation
assert:
that:
- "dnf_result is success"
- "dnf_result is changed"
- "dnf_result is not failed"
- name: verify dnf module outputs
assert:
that:
- "'changed' in dnf_result"
- "'results' in dnf_result"
- name: Create a temp RPM file which does not contain nevra information
file:
name: "/tmp/non_existent_pkg.rpm"
state: touch
- name: Try installing RPM file which does not contain nevra information
dnf:
name: "/tmp/non_existent_pkg.rpm"
state: present
register: no_nevra_info_result
ignore_errors: yes
- name: Verify RPM failed to install
assert:
that:
- "'changed' in no_nevra_info_result"
- "'msg' in no_nevra_info_result"
- name: Delete a temp RPM file
file:
name: "/tmp/non_existent_pkg.rpm"
state: absent
- name: uninstall lsof
dnf:
name: lsof
state: removed
- name: check lsof with rpm
shell: rpm -q lsof
ignore_errors: True
register: rpm_lsof_result
- name: verify lsof is uninstalled
assert:
that:
- "rpm_lsof_result is failed"
- name: exclude lsof
lineinfile:
dest: /etc/dnf/dnf.conf
regexp: (^exclude=)(.)*
line: "exclude=lsof*"
state: present
# begin test case where disable_excludes is supported
- name: Try install lsof without disable_excludes
dnf: name=lsof state=latest
register: dnf_lsof_result
ignore_errors: True
- name: verify lsof did not install because it is in exclude list
assert:
that:
- "dnf_lsof_result is failed"
- name: install lsof with disable_excludes
dnf: name=lsof state=latest disable_excludes=all
register: dnf_lsof_result_using_excludes
- name: verify lsof did install using disable_excludes=all
assert:
that:
- "dnf_lsof_result_using_excludes is success"
- "dnf_lsof_result_using_excludes is changed"
- "dnf_lsof_result_using_excludes is not failed"
- name: remove exclude lsof (cleanup dnf.conf)
lineinfile:
dest: /etc/dnf/dnf.conf
regexp: (^exclude=lsof*)
line: "exclude="
state: present
# end test case where disable_excludes is supported

@ -2,7 +2,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from ansible.compat.tests import unittest from ansible.compat.tests import unittest
from ansible.modules.packaging.os import yum from ansible.modules.packaging.os.yum import YumModule
yum_plugin_load_error = """ yum_plugin_load_error = """
@ -141,34 +141,34 @@ class TestYumUpdateCheckParse(unittest.TestCase):
self.assertIsInstance(result, dict) self.assertIsInstance(result, dict)
def test_empty_output(self): def test_empty_output(self):
res = yum.parse_check_update("") res = YumModule.parse_check_update("")
expected_pkgs = [] expected_pkgs = []
self._assert_expected(expected_pkgs, res) self._assert_expected(expected_pkgs, res)
def test_longname(self): def test_longname(self):
res = yum.parse_check_update(longname) res = YumModule.parse_check_update(longname)
expected_pkgs = ['xxxxxxxxxxxxxxxxxxxxxxxxxx', 'glibc'] expected_pkgs = ['xxxxxxxxxxxxxxxxxxxxxxxxxx', 'glibc']
self._assert_expected(expected_pkgs, res) self._assert_expected(expected_pkgs, res)
def test_plugin_load_error(self): def test_plugin_load_error(self):
res = yum.parse_check_update(yum_plugin_load_error) res = YumModule.parse_check_update(yum_plugin_load_error)
expected_pkgs = [] expected_pkgs = []
self._assert_expected(expected_pkgs, res) self._assert_expected(expected_pkgs, res)
def test_wrapped_output_1(self): def test_wrapped_output_1(self):
res = yum.parse_check_update(wrapped_output_1) res = YumModule.parse_check_update(wrapped_output_1)
expected_pkgs = ["vms-agent"] expected_pkgs = ["vms-agent"]
self._assert_expected(expected_pkgs, res) self._assert_expected(expected_pkgs, res)
def test_wrapped_output_2(self): def test_wrapped_output_2(self):
res = yum.parse_check_update(wrapped_output_2) res = YumModule.parse_check_update(wrapped_output_2)
expected_pkgs = ["empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty", expected_pkgs = ["empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty",
"libtiff"] "libtiff"]
self._assert_expected(expected_pkgs, res) self._assert_expected(expected_pkgs, res)
def test_wrapped_output_3(self): def test_wrapped_output_3(self):
res = yum.parse_check_update(wrapped_output_3) res = YumModule.parse_check_update(wrapped_output_3)
expected_pkgs = ["ceph", "ceph-base", "ceph-common", "ceph-mds", expected_pkgs = ["ceph", "ceph-base", "ceph-common", "ceph-mds",
"ceph-mon", "ceph-osd", "ceph-selinux", "libcephfs1", "ceph-mon", "ceph-osd", "ceph-selinux", "libcephfs1",
"librados2", "libradosstriper1", "librbd1", "librgw2", "librados2", "libradosstriper1", "librbd1", "librgw2",
@ -176,16 +176,16 @@ class TestYumUpdateCheckParse(unittest.TestCase):
self._assert_expected(expected_pkgs, res) self._assert_expected(expected_pkgs, res)
def test_wrapped_output_4(self): def test_wrapped_output_4(self):
res = yum.parse_check_update(wrapped_output_4) res = YumModule.parse_check_update(wrapped_output_4)
expected_pkgs = ["ipxe-roms-qemu", "quota", "quota-nls", "rdma", "screen", expected_pkgs = ["ipxe-roms-qemu", "quota", "quota-nls", "rdma", "screen",
"sos", "sssd-client"] "sos", "sssd-client"]
self._assert_expected(expected_pkgs, res) self._assert_expected(expected_pkgs, res)
def test_wrapped_output_rhel7(self): def test_wrapped_output_rhel7(self):
res = yum.parse_check_update(unwrapped_output_rhel7) res = YumModule.parse_check_update(unwrapped_output_rhel7)
self._assert_expected(unwrapped_output_rhel7_expected_pkgs, res) self._assert_expected(unwrapped_output_rhel7_expected_pkgs, res)
def test_wrapped_output_rhel7_obsoletes(self): def test_wrapped_output_rhel7_obsoletes(self):
res = yum.parse_check_update(unwrapped_output_rhel7_obsoletes) res = YumModule.parse_check_update(unwrapped_output_rhel7_obsoletes)
self._assert_expected(unwrapped_output_rhel7_expected_pkgs, res) self._assert_expected(unwrapped_output_rhel7_expected_pkgs, res)

Loading…
Cancel
Save