ZFS: New PEHelperMode support, add OpenZFS 2.0 workaround

main
svalouch 3 years ago
parent 42b6447665
commit 89a7a98470

@ -0,0 +1,29 @@
# Changelog
All notable changes to this project will be documented in this file.
## Release 0.0.4 - Unreleased
## Release 0.0.3 - Unreleased
**Features**
- The privilege escalation helper can now run proactively in some situations. While it could only be enabled or disabled (`use_pe_helper = True/False`), it can now also run proactively in some situations. To accomodate this, a new enum `PEHelperMode` was added. Instead of simply enabling or disabling it, a `PEHelperMode` enum has been introduced. The old `False` for disabling is now `PEHelperMode.DO_NOT_USE`, both `PEHelperMode.USE_IF_REQUIRED` and `PEHelperMode.USE_PROACTIVE` replace the old `True`, with the latter performing the action or parts of it (like umounting before destroying) before the actual action occurs. The old parameters and properties are slated for removal in the next release.
**Deprecated features**
- `use_pe_helper` was deprecated in favor of `pe_helper_mode`. Usage of the old parameters or properties will generate a `DeprecationWarning`. When used, they will set `pe_helper_mode` if appropriate, by either setting it to `PEHelperMode.DO_NOT_USE` (if `False`) or `PEHelperMode.USE_IF_REQUIRED` (when set `True`). The parameters and properties will be removed in the next version (`0.0.4`).
**Features**
- Support privilege escalation when removing a fileset in OpenZFS 2.0+, where the error message does no longer indicate that it is a permission problem.
- In some situations, privilege escalation can be performed proactively, i.e. if selected it calls the pe_helper right away instead of failing, analyzing the error message and then calling it.
**Tooling**
- Fix pytest deprecation warning
## Release 0.0.2
It spawned like this out of thin air.

@ -6,7 +6,7 @@ with open('README.rst', 'rt') as fh:
setup( setup(
name='simplezfs', name='simplezfs',
version='0.0.2', version='0.0.3',
author='Stefan Valouch', author='Stefan Valouch',
author_email='svalouch@valouch.com', author_email='svalouch@valouch.com',
description='Simple, low-level ZFS API', description='Simple, low-level ZFS API',
@ -54,5 +54,6 @@ setup(
'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
], ],
) )

@ -163,12 +163,12 @@ class SudoPEHelper(PEHelperBase):
args = [self.__exe, '-n'] + cmd args = [self.__exe, '-n'] + cmd
if len(args) < 4: # "sudo -n zfs mount fileset" is the shortest that makes sense to use with sudo if len(args) < 4: # "sudo -n zfs mount fileset" is the shortest that makes sense to use with sudo
raise PEHelperException('Command suspicously short') raise PEHelperException('Command suspicously short')
self.log.debug(f'About to run: {args}') self.log.debug('About to run: %s', args)
proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
if proc.returncode != 0 or len(proc.stderr) > 0: if proc.returncode != 0 or len(proc.stderr) > 0:
raise PEHelperException(f'Error running command {" ".join(args)}: {proc.stderr}') raise PEHelperException(f'Error running command {" ".join(args)}: {proc.stderr}')
self.log.debug(f'pe helper command successful. stout: {proc.stdout}') self.log.debug('pe helper command successful. stdout: %s', proc.stdout)
def zfs_mount(self, fileset: str) -> None: def zfs_mount(self, fileset: str) -> None:
if '/' in fileset: if '/' in fileset:

@ -95,6 +95,18 @@ class Dataset(NamedTuple):
return Dataset(name=ds_name, parent=ds_parent, type=ds_type, full_path=value, pool=ds_pool) return Dataset(name=ds_name, parent=ds_parent, type=ds_type, full_path=value, pool=ds_pool)
class PEHelperMode(Enum):
'''
Modes for chosing whether to use the PEHelper and how.
'''
#: Do not use the PEHelper
DO_NOT_USE = 0
#: Use if ZFS indicates it is required
USE_IF_REQUIRED = 1
#: Use proactively for actions known to require it
USE_PROACTIVE = 2
@unique @unique
class PropertySource(str, Enum): class PropertySource(str, Enum):
''' '''

@ -4,6 +4,7 @@ ZFS frontend API
''' '''
import logging import logging
import warnings
from typing import Dict, List, Optional, Union from typing import Dict, List, Optional, Union
from .exceptions import ( from .exceptions import (
@ -14,7 +15,7 @@ from .exceptions import (
ValidationError, ValidationError,
) )
from .pe_helper import PEHelperBase from .pe_helper import PEHelperBase
from .types import Dataset, DatasetType, Property from .types import Dataset, DatasetType, PEHelperMode, Property
from .validation import ( from .validation import (
validate_dataset_path, validate_dataset_path,
validate_metadata_property_name, validate_metadata_property_name,
@ -55,19 +56,32 @@ class ZFS:
The parameter ``use_pe_helper`` is used to control whether the ``pe_helper`` will be used when performing actions The parameter ``use_pe_helper`` is used to control whether the ``pe_helper`` will be used when performing actions
that require elevated permissions. It can be changed at anytime using the ``use_pe_helper`` property. that require elevated permissions. It can be changed at anytime using the ``use_pe_helper`` property.
.. versionchanged:: 0.0.3
``use_pe_helper`` became ``pe_helper_mode``
:param metadata_namespace: Default namespace :param metadata_namespace: Default namespace
:param pe_helper: Privilege escalation (PE) helper to use for actions that require elevated privileges (root). :param pe_helper: Privilege escalation (PE) helper to use for actions that require elevated privileges (root).
:param use_pe_helper: Whether to use the PE helper for creating and (u)mounting. :param pe_helper_mode: How and when to use the PEHelper. Defaults to not using it at all.
:param kwargs: Extra arguments, ignored :param kwargs: Extra arguments, ignored
''' '''
def __init__(self, *, metadata_namespace: Optional[str] = None, pe_helper: Optional[PEHelperBase] = None, def __init__(self, *, metadata_namespace: Optional[str] = None, pe_helper: Optional[PEHelperBase] = None,
use_pe_helper: bool = False, **kwargs) -> None: pe_helper_mode: PEHelperMode = PEHelperMode.DO_NOT_USE, **kwargs) -> None:
self.metadata_namespace = metadata_namespace self.metadata_namespace = metadata_namespace
self.pe_helper = pe_helper self.pe_helper = pe_helper
self.use_pe_helper = use_pe_helper self.pe_helper_mode = pe_helper_mode
# TODO remove this in 0.0.4
if 'use_pe_helper' in kwargs:
msg = 'Parameter "use_pe_helper" is deprecated in favor of "pe_helper_mode" and will be ignored from ' \
'version 0.0.4 onwards.'
if bool(kwargs['use_pe_helper']) and pe_helper_mode == PEHelperMode.DO_NOT_USE:
msg += ' Setting pe_helper_mode=PEHelperMode.USE_IF_REQUIRED to restore previous behavior.'
self.pe_helper_mode = PEHelperMode.USE_IF_REQUIRED
warnings.warn(msg, DeprecationWarning)
def __repr__(self) -> str: def __repr__(self) -> str:
return f'<ZFS(pe_helper="{self._pe_helper}", use_pe_helper="{self._use_pe_helper}")>' return f'<ZFS(pe_helper="{self._pe_helper}", pe_helper_mode="{self._pe_helper_mode}")>'
@property @property
def metadata_namespace(self) -> Optional[str]: def metadata_namespace(self) -> Optional[str]:
@ -96,27 +110,70 @@ class ZFS:
def pe_helper(self, helper: Optional[PEHelperBase]) -> None: def pe_helper(self, helper: Optional[PEHelperBase]) -> None:
''' '''
Sets the privilege escalation (PE) helper. Supply ``None`` to unset it. Sets the privilege escalation (PE) helper. Supply ``None`` to unset it.
:raises FileNotFoundError: if the script can't be found or is not executable.
''' '''
if helper is None: if helper is None:
log.debug('PE helper is None') log.debug('PE helper is None')
self._pe_helper = helper self._pe_helper = helper
# TODO remove this in 0.0.4
@property @property
def use_pe_helper(self) -> bool: def use_pe_helper(self) -> bool:
''' '''
Returns whether the privilege escalation (PE) helper should be used. If the helper has not been set, this Returns whether the privilege escalation (PE) helper should be used. If the helper has not been set, this
property evaluates to ``False``. property evaluates to ``False``.
.. deprecated:: 0.0.3
Use :func:`~simplezfs.zfs.pe_helper_mode` instead. Returns whether the helper mode is **not** set to
``PEHelperMode.DO_NOT_USE`` if the helper is set.
This property will be removed in 0.0.4!
''' '''
return self._pe_helper is not None and self._use_pe_helper warnings.warn('Property "use_pe_helper" is deprecated in favor of "pe_helper_mode" and will be removed in '
'0.0.4', DeprecationWarning)
return self._pe_helper is not None and self._pe_helper_mode != PEHelperMode.DO_NOT_USE
# TODO remove this in 0.0.4
@use_pe_helper.setter @use_pe_helper.setter
def use_pe_helper(self, use: bool) -> None: def use_pe_helper(self, use: bool) -> None:
''' '''
Enable or disable using the privilege escalation (PE) helper. Enable or disable using the privilege escalation (PE) helper.
.. deprecated:: 0.0.3
Use :func:`~simplezfs.zfs.pe_helper_mode` instead. If set to ``False``, sets the mode to
``PEHelperMode.DO_NOT_USE``, ``True`` sets to ``PEHelperMode.USE_IF_NEEDED`` unless it is set to
``PEHelperMode.USE_PROACTIVE`` already, in which case it will do nothing.
This property will be removed in 0.0.4!
'''
if use:
if self.pe_helper_mode == PEHelperMode.DO_NOT_USE:
self.pe_helper_mode = PEHelperMode.USE_PROACTIVE
else:
self.pe_helper_mode = PEHelperMode.DO_NOT_USE
warnings.warn('Property "use_pe_helper" is deprecated in favor of "pe_helper_mode" and will be removed in '
'0.0.4', DeprecationWarning)
@property
def pe_helper_mode(self) -> PEHelperMode:
'''
Returns whether the privilege escalation (PE) helper should be used and when. If the helper has not been set,
this property evaluates to ``False``.
.. versionadded:: 0.0.3
'''
if self._pe_helper is None:
return PEHelperMode.DO_NOT_USE
return self._pe_helper_mode
@pe_helper_mode.setter
def pe_helper_mode(self, mode: PEHelperMode) -> None:
'''
Sets the privilege escalation (PE) helper mode.
.. versionadded:: 0.0.3
''' '''
self._use_pe_helper = use self._pe_helper_mode = mode
def dataset_exists(self, name: str) -> bool: def dataset_exists(self, name: str) -> bool:
''' '''
@ -155,41 +212,51 @@ class ZFS:
''' '''
raise NotImplementedError(f'{self} has not implemented this function') raise NotImplementedError(f'{self} has not implemented this function')
def set_mountpoint(self, fileset: str, mountpoint: str, *, use_pe_helper: Optional[bool] = None) -> None: def set_mountpoint(self, fileset: str, mountpoint: str, *, pe_helper_mode: Optional[PEHelperMode] = None) -> None:
''' '''
Sets or changes the mountpoint property of a fileset. While this can be achieved using the generic function Sets or changes the mountpoint property of a fileset. While this can be achieved using the generic function
:func:`~ZFS.set_property`, it allows for using the privilege escalation (PE) helper if so desired. :func:`~ZFS.set_property`, it allows for using the privilege escalation (PE) helper if so desired.
The argument ``use_pe_helper`` can overwrite the property of the same name. If the argument is None, the The argument ``pe_helper_mode`` can overwrite the property of the same name. If the argument is None, the
properties value will be assumed. In any case, the function attempts to set the property on its own first. If properties value will be assumed. In any case, the function attempts to set the property on its own first. If
that fails, it evaluates if the PE helper should be used, and will error out if it should be used but has not that fails, it evaluates if the PE helper should be used, and will error out if it should be used but has not
been set. If the helper fails, a :class:`~simplezfs.exceptions.PEHelperException` is raised. been set. If the helper fails, a :class:`~simplezfs.exceptions.PEHelperException` is raised.
:param fileset: The fileset to modify. :param fileset: The fileset to modify.
:param mountpoint: The new value for the ``mountpoint`` property. :param mountpoint: The new value for the ``mountpoint`` property.
:param use_pe_helper: Overwrite the default for using the privilege escalation (PE) helper for this task. :param pe_helper_mode: Overwrite the default for using the privilege escalation (PE) helper for this task.
``None`` (default) uses the default setting. If the helper is not set, it is not used. ``None`` (default) uses the default setting. If the helper is not set, it is not used.
:raises DatasetNotFound: if the fileset could not be found. :raises DatasetNotFound: if the fileset could not be found.
:raises ValidationError: if validating the parameters failed. :raises ValidationError: if validating the parameters failed.
''' '''
if '/' not in fileset:
validate_pool_name(fileset)
else:
validate_dataset_path(fileset)
validate_property_value(mountpoint)
ds_type = self.get_property(fileset, 'type') ds_type = self.get_property(fileset, 'type')
if ds_type != 'filesystem': if ds_type != 'filesystem':
raise ValidationError(f'Dataset is not a filesystem and can\'t have its mountpoint set') raise ValidationError('Dataset is not a filesystem and can\'t have its mountpoint set')
try: real_pe_helper_mode = pe_helper_mode if pe_helper_mode is not None else self.pe_helper_mode
self.set_property(fileset, 'mountpoint', mountpoint) if self.pe_helper is not None and real_pe_helper_mode == PEHelperMode.USE_PROACTIVE:
except PermissionError as exc: log.info('Proactively calling PE helper for setting the mountpoint for "%s"', fileset)
if self.pe_helper is not None: self.pe_helper.zfs_set_mountpoint(fileset, mountpoint)
real_use_pe_helper = use_pe_helper if use_pe_helper is not None else self.use_pe_helper else:
try:
if real_use_pe_helper: self.set_property(fileset, 'mountpoint', mountpoint)
log.info(f'Permission error when setting mountpoint for "{fileset}", retrying using PE helper') except PermissionError as exc:
self.pe_helper.zfs_set_mountpoint(fileset, mountpoint) if self.pe_helper is not None:
if real_pe_helper_mode == PEHelperMode.USE_IF_REQUIRED:
log.info(f'Permission error when setting mountpoint for "{fileset}", retrying using PE helper')
self.pe_helper.zfs_set_mountpoint(fileset, mountpoint)
else:
log.error(f'Permission error when setting mountpoint for "{fileset}" and not using PE helper')
raise exc
else: else:
log.error(f'Permission error when setting mountpoint for "{fileset}" and not using PE helper') log.error(f'Permission error when setting mountpoint for "{fileset}" and PE helper is not set')
raise exc
else:
log.error(f'Permission error when setting mountpoint for "{fileset}" and PE helper is not set')
def set_property(self, dataset: str, key: str, value: str, *, metadata: bool = False, def set_property(self, dataset: str, key: str, value: str, *, metadata: bool = False,
overwrite_metadata_namespace: Optional[str] = None) -> None: overwrite_metadata_namespace: Optional[str] = None) -> None:
@ -463,7 +530,7 @@ class ZFS:
''' '''
if blocksize is not None: if blocksize is not None:
if properties is None: if properties is None:
properties = dict() properties = {}
properties['blocksize'] = f'{blocksize}' properties['blocksize'] = f'{blocksize}'
return self.create_dataset( return self.create_dataset(
name, name,
@ -544,7 +611,7 @@ class ZFS:
validate_native_property_name(k) validate_native_property_name(k)
validate_property_value(val) validate_property_value(val)
_metadata_properties = dict() # type: Dict[str, str] _metadata_properties: Dict[str, str] = {}
if metadata_properties is not None: if metadata_properties is not None:
for k, val in metadata_properties.items(): for k, val in metadata_properties.items():
# if the name has no namespace, add the default one if set # if the name has no namespace, add the default one if set
@ -557,14 +624,14 @@ class ZFS:
_metadata_properties[meta_name] = metadata_properties[k] _metadata_properties[meta_name] = metadata_properties[k]
validate_metadata_property_name(meta_name) validate_metadata_property_name(meta_name)
if type(val) != str: if not isinstance(val, str):
_metadata_properties[meta_name] = f'{val}' _metadata_properties[meta_name] = f'{val}'
validate_property_value(_metadata_properties[meta_name]) validate_property_value(_metadata_properties[meta_name])
# sparse and size are reset for all but the VOLUME type # sparse and size are reset for all but the VOLUME type
if dataset_type != DatasetType.VOLUME: if dataset_type != DatasetType.VOLUME:
if sparse: if sparse:
log.warning('Ignoring "sparse" it is only valid for volumes') log.warning('Ignoring "sparse", it is only valid for volumes')
sparse = False sparse = False
if size: if size:
log.warning('Ignoring "size", it is only valid for volumes') log.warning('Ignoring "size", it is only valid for volumes')
@ -601,8 +668,10 @@ class ZFS:
raise ValidationError('blocksize must be between 2 and 128kb (inclusive)') raise ValidationError('blocksize must be between 2 and 128kb (inclusive)')
if not ((blocksize & (blocksize - 1) == 0) and blocksize != 0): if not ((blocksize & (blocksize - 1) == 0) and blocksize != 0):
raise ValidationError('blocksize must be a power of two') raise ValidationError('blocksize must be a power of two')
return self._create_volume(name, properties, _metadata_properties, sparse, size, recursive)
return self._create_fileset(name, properties, _metadata_properties, recursive)
elif dataset_type == DatasetType.SNAPSHOT: if dataset_type == DatasetType.SNAPSHOT:
if recursive: if recursive:
log.warning('"recursive" set for snapshot or bookmark, ignored') log.warning('"recursive" set for snapshot or bookmark, ignored')
recursive = False recursive = False
@ -619,15 +688,11 @@ class ZFS:
# TODO # TODO
return self._create_dataset(name, dataset_type=dataset_type, properties=properties, return self._create_snapshot(name, properties, _metadata_properties, recursive)
metadata_properties=_metadata_properties, sparse=sparse, size=size,
recursive=recursive)
def _create_dataset( def _create_volume(
self, self,
name: str, name: str,
*,
dataset_type: DatasetType,
properties: Dict[str, str] = None, properties: Dict[str, str] = None,
metadata_properties: Dict[str, str] = None, metadata_properties: Dict[str, str] = None,
sparse: bool = False, sparse: bool = False,
@ -635,18 +700,61 @@ class ZFS:
recursive: bool = False, recursive: bool = False,
) -> Dataset: ) -> Dataset:
''' '''
Actual implementation of :func:`create_dataset`. Actual implementation of :func:`create_volume`.
:param name: The name of the new dataset. This includes the full path, e.g. ``tank/data/newdataset``. :param name: The name of the new volume. This includes the full path, e.g. ``tank/data/newvolume``.
:param dataset_type: Indicates the type of the dataset to be created. :param properties: A dict containing the properties for this new volume. These are the native properties.
:param properties: A dict containing the properties for this new dataset. These are the native properties.
:param metadata_properties: The metadata properties to set. To use a different namespace than the default (or :param metadata_properties: The metadata properties to set. To use a different namespace than the default (or
when no default is set), use the ``namespace:key`` format for the dict keys. when no default is set), use the ``namespace:key`` format for the dict keys.
:param sparse: For volumes, specifies whether a sparse (thin provisioned) or normal (thick provisioned) volume :param sparse: For volumes, specifies whether a sparse (thin provisioned) or normal (thick provisioned) volume
should be created. should be created.
:param size: For volumes, specifies the size in bytes. :param size: Size in bytes.
t:param recursive: Recursively create the parent fileset. Refer to the ZFS documentation about the `-p` t:param recursive: Recursively create the parent fileset. Refer to the ZFS documentation about the `-p`
parameter for ``zfs create``. This does not apply to types other than volumes or filesets. parameter for ``zfs create``.
:raises ValidationError: If validating the parameters failed.
:raises DatasetNotFound: If the dataset can't be found (snapshot, bookmark) or the parent dataset can't be
found (fileset, volume with ``recursive = False``).
'''
raise NotImplementedError(f'{self} has not implemented this function')
def _create_fileset(
self,
name: str,
properties: Dict[str, str] = None,
metadata_properties: Dict[str, str] = None,
recursive: bool = False
) -> Dataset:
'''
Actual implementation of :func:`create_fileset`.
:param name: The name of the new volume. This includes the full path, e.g. ``tank/data/newfileset``.
:param properties: A dict containing the properties for this new volume. These are the native properties.
:param metadata_properties: The metadata properties to set. To use a different namespace than the default (or
when no default is set), use the ``namespace:key`` format for the dict keys.
:param recursive: Recursively create the parent fileset. Refer to the ZFS documentation about the `-p`
parameter for ``zfs create``.
:raises ValidationError: If validating the parameters failed.
:raises DatasetNotFound: If the dataset can't be found (snapshot, bookmark) or the parent dataset can't be
found (fileset, volume with ``recursive = False``).
'''
raise NotImplementedError(f'{self} has not implemented this function')
def _create_snapshot(
self,
name: str,
properties: Dict[str, str] = None,
metadata_properties: Dict[str, str] = None,
recursive: bool = False
) -> Dataset:
'''
Actual implementation of :func:`create_snapshot`.
:param name: The name of the new snapshot. This includes the full path, e.g. ``tank/data/dataset@snap``.
:param properties: A dict containing the properties for this new volume. These are the native properties.
:param metadata_properties: The metadata properties to set. To use a different namespace than the default (or
when no default is set), use the ``namespace:key`` format for the dict keys.
:param recursive: Recursively create the parent fileset. Refer to the ZFS documentation about the `-r`
parameter for ``zfs snapshot``.
:raises ValidationError: If validating the parameters failed. :raises ValidationError: If validating the parameters failed.
:raises DatasetNotFound: If the dataset can't be found (snapshot, bookmark) or the parent dataset can't be :raises DatasetNotFound: If the dataset can't be found (snapshot, bookmark) or the parent dataset can't be
found (fileset, volume with ``recursive = False``). found (fileset, volume with ``recursive = False``).
@ -695,13 +803,39 @@ class ZFS:
''' '''
raise NotImplementedError(f'{self} has not implemented this function') raise NotImplementedError(f'{self} has not implemented this function')
# def mount_fileset(self, fileset: str) -> None:
# '''
# Mounts the fileset.
# :param fileset: The name of the fileset to mount.
# :raise ValidationError: If dataset pointed to by ``fileset`` is not a fileset or has no ``mountpoint`` property
# :raise DatasetNotFound: If the fileset can't be found.
# '''
# self._mount_umount_fileset(fileset, mount=True)
# def umount_fileset(self, fileset: str) -> None:
# '''
# Umounts the fileset.
# :param fileset: The name of the fileset to umount.
# :raise ValidationError: If the dataset pointed to by ``fileset`` is not a fileset.
# :raise DatasetNotFound: If the fileset can't be found.
# '''
# self._mount_umount_fileset(fileset, mount=False)
# def _mount_umount_fileset(self, fileset: str, mount: bool) -> None:
# '''
# Internal implementation of :func:`~simplezfs.zfs.mount_fileset` and :func:`~simplezfs.zfs.umount_fileset`.
# '''
# raise NotImplementedError(f'{self} has not implemented this function')
def _execute_pe_helper(self, action: str, name: str, mountpoint: Optional[str] = None): def _execute_pe_helper(self, action: str, name: str, mountpoint: Optional[str] = None):
''' '''
Runs the specified action through the PE helper. Runs the specified action through the PE helper.
:param action: The action to perform. Valid are: "create", "destroy", "set_mountpoint". :param action: The action to perform. Valid are: "create", "destroy", "set_mountpoint", "mount", "umount".
:param name: The name of the dataset to operate on. :param name: The name of the dataset to operate on.
:param mountpoint: The mountpoint for create/set_mountpoint actions. :param mountpoint: The mountpoint for create/set_mountpoint actions, ignored otherwise.
:raises ValidationError: If the parameters are invalid. :raises ValidationError: If the parameters are invalid.
:raises PEHelperException: If the PE helper reported an error. :raises PEHelperException: If the PE helper reported an error.
''' '''
@ -719,10 +853,12 @@ class ZFS:
elif action == 'destroy': elif action == 'destroy':
cmd = [self._pe_helper, 'destroy', name] cmd = [self._pe_helper, 'destroy', name]
elif action == 'set_mountpoint': elif action == 'set_mountpoint':
if mountpoint is None: if mountpoint is None or mountpoint != '':
raise ValidationError(f'Mountpoint has to be set for action "{action}"') raise ValidationError(f'Mountpoint has to be set for action "{action}"')
# TODO validate filesystem path # TODO validate filesystem path
cmd = [self._pe_helper, 'set_mountpoint', name, mountpoint] cmd = [self._pe_helper, 'set_mountpoint', name, mountpoint]
elif action in ('mount', 'umount'):
cmd = [self._pe_helper, action, name]
else: else:
raise ValidationError('Invalid action') raise ValidationError('Invalid action')

@ -3,15 +3,15 @@
CLI-based implementation. CLI-based implementation.
''' '''
from typing import Dict, List, Optional, Union
import logging import logging
import os import os
import shutil import shutil
import subprocess import subprocess
from typing import Dict, List, NoReturn, Optional, Union
from .exceptions import DatasetNotFound, PropertyNotFound, ValidationError from .exceptions import DatasetNotFound, PropertyNotFound
from .pe_helper import PEHelperBase from .pe_helper import PEHelperBase
from .types import Dataset, DatasetType, Property, PropertySource from .types import Dataset, PEHelperMode, Property, PropertySource
from .validation import ( from .validation import (
validate_dataset_path, validate_dataset_path,
validate_pool_name, validate_pool_name,
@ -36,9 +36,9 @@ class ZFSCli(ZFS):
self.find_executable(path=zfs_exe) self.find_executable(path=zfs_exe)
def __repr__(self) -> str: def __repr__(self) -> str:
return f'<ZFSCli(exe="{self.__exe}", pe_helper="{self._pe_helper}", use_pe_helper="{self._use_pe_helper}")>' return f'<ZFSCli(exe="{self.__exe}", pe_helper="{self._pe_helper}", pe_helper_mode="{self._pe_helper_mode}")>'
def find_executable(self, path: str = None): def find_executable(self, path: str = None) -> None:
''' '''
Tries to find the executable ``zfs(8)``. If ``path`` points to an executable, it is used instead of relying on Tries to find the executable ``zfs(8)``. If ``path`` points to an executable, it is used instead of relying on
the PATH to find it. It does not fall back to searching in $PATH of ``path`` does not point to an executable. the PATH to find it. It does not fall back to searching in $PATH of ``path`` does not point to an executable.
@ -59,7 +59,7 @@ class ZFSCli(ZFS):
@property @property
def executable(self) -> str: def executable(self) -> str:
''' '''
Returns the zfs executable that was found by find_executable Returns the zfs executable that was found by find_executable.
''' '''
return self.__exe return self.__exe
@ -122,7 +122,7 @@ class ZFSCli(ZFS):
res.append(Dataset.from_string(name.strip())) res.append(Dataset.from_string(name.strip()))
return res return res
def handle_command_error(self, proc: subprocess.CompletedProcess, dataset: str = None) -> None: def handle_command_error(self, proc: subprocess.CompletedProcess, dataset: str = None) -> NoReturn:
''' '''
Handles errors that occured while running a command. Handles errors that occured while running a command.
@ -155,10 +155,10 @@ class ZFSCli(ZFS):
:raises DatasetNotFound: If the dataset does not exist. :raises DatasetNotFound: If the dataset does not exist.
''' '''
args = [self.__exe, 'set', f'{key}={value}', dataset] args = [self.__exe, 'set', f'{key}={value}', dataset]
log.debug(f'_set_property: about to run command: {args}') log.debug('_set_property: about to run command: %s', args)
proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
if proc.returncode != 0 or len(proc.stderr) > 0: if proc.returncode != 0 or len(proc.stderr) > 0:
log.debug(f'_set_propery: command failed, code={proc.returncode}, stderr="{proc.stderr}"') log.debug('_set_propery: command failed, code=%d, stderr="%s"', proc.returncode, proc.stderr.strip())
self.handle_command_error(proc, dataset=dataset) self.handle_command_error(proc, dataset=dataset)
def _get_property(self, dataset: str, key: str, is_metadata: bool) -> Property: def _get_property(self, dataset: str, key: str, is_metadata: bool) -> Property:
@ -169,10 +169,10 @@ class ZFSCli(ZFS):
:raises PropertyNotFound: If the property does not exist or is invalid (for native ones). :raises PropertyNotFound: If the property does not exist or is invalid (for native ones).
''' '''
args = [self.__exe, 'get', '-H', '-p', key, dataset] args = [self.__exe, 'get', '-H', '-p', key, dataset]
log.debug(f'_get_property: about to run command: {args}') log.debug('_get_property: about to run command: %s', args)
proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
if proc.returncode != 0 or len(proc.stderr) > 0: if proc.returncode != 0 or len(proc.stderr) > 0:
log.debug(f'_get_property: command failed, code={proc.returncode}, stderr="{proc.stderr.strip()}"') log.debug('_get_property: command failed, code=%d, stderr="%s"', proc.returncode, proc.stderr.strip())
self.handle_command_error(proc, dataset=dataset) self.handle_command_error(proc, dataset=dataset)
name, prop_name, prop_value, prop_source = proc.stdout.strip().split('\t') name, prop_name, prop_value, prop_source = proc.stdout.strip().split('\t')
if name != dataset: if name != dataset:
@ -196,10 +196,10 @@ class ZFSCli(ZFS):
:raises DatasetNotFound: If the dataset does not exist. :raises DatasetNotFound: If the dataset does not exist.
''' '''
args = [self.__exe, 'get', '-H', '-p', 'all', dataset] args = [self.__exe, 'get', '-H', '-p', 'all', dataset]
log.debug(f'_get_properties: about to run command: {args}') log.debug('_get_properties: about to run command: %s', args)
proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
if proc.returncode != 0 or len(proc.stderr) > 0: if proc.returncode != 0 or len(proc.stderr) > 0:
log.debug(f'_get_properties: command faild, code={proc.returncode}, stderr="{proc.stderr}"') log.debug('_get_properties: command faild, code=%d, stderr="%s"', proc.returncode, proc.stderr.strip())
self.handle_command_error(proc, dataset=dataset) self.handle_command_error(proc, dataset=dataset)
res = list() res = list()
for line in proc.stdout.split('\n'): for line in proc.stdout.split('\n'):
@ -216,120 +216,124 @@ class ZFSCli(ZFS):
res.append(Property(key=prop_name, value=prop_value, source=property_source, namespace=None)) res.append(Property(key=prop_name, value=prop_value, source=property_source, namespace=None))
return res return res
def _create_dataset( def _create_fileset(self, name: str, properties: Dict[str, str] = None, metadata_properties: Dict[str, str] = None,
self, recursive: bool = False) -> Dataset:
name: str,
*,
dataset_type: DatasetType,
properties: Dict[str, str] = None,
metadata_properties: Dict[str, str] = None,
sparse: bool = False,
size: Optional[int] = None,
recursive: bool = False,
) -> Dataset:
if dataset_type == DatasetType.BOOKMARK:
raise ValidationError('Bookmarks can\'t be created by this function')
# assemble the options list for properties
prop_args: List[str] = [] prop_args: List[str] = []
if properties: if properties:
for nk, nv in properties.items(): for normalkey, normalvalue in properties.items():
prop_args += ['-o', f'{nk}={nv}'] prop_args += ['-o', f'{normalkey}={normalvalue}']
if metadata_properties: if metadata_properties:
for mk, mv in metadata_properties.items(): for metakey, metavalue in metadata_properties.items():
prop_args += ['-o', f'{mk}={mv}'] prop_args += ['-o', f'{metakey}={metavalue}']
if dataset_type == DatasetType.FILESET:
assert size is None, 'Filesets have no size'
assert sparse is False, 'Filesets cannot be sparse'
# try on our own first, then depending on settings use the pe helper
args = [self.__exe, 'create']
if recursive:
args += ['-p']
args += prop_args
args += [name]
log.debug(f'executing: {args}')
print(args)
proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
if proc.returncode != 0 or len(proc.stderr) > 0:
log.debug(f'Process died with returncode {proc.returncode} and stderr: "{proc.stderr.strip()}"')
# check if we tried something only root can do
if 'filesystem successfully created, but it may only be mounted by root' in proc.stderr:
log.debug('Command output indicates that we need to run the PE Helper')
if self.use_pe_helper:
# The mountpoint property may be set, in which case we can run the PE helper. If it is not
# set, we'd need to compute it based on the parent, but for now we simply error out.
if properties and 'mountpoint' in properties:
mp = properties['mountpoint']
if self.pe_helper is not None:
test_prop = self.get_property(dataset=name, key='mountpoint', metadata=False)
if test_prop.value == mp:
log.info(f'Fileset {name} was created with mountpoint set')
else:
log.info(f'Fileset {name} was created, using pe_helper to set the mountpoint')
self.pe_helper.zfs_set_mountpoint(name, mp)
test_prop = self.get_property(dataset=name, key='mounted', metadata=False)
if test_prop.value == 'yes':
log.info(f'Fileset {name} is mounted')
else:
log.info(f'Using pe_helper to mount fileset {name}')
self.pe_helper.zfs_mount(name)
log.info(f'Fileset {name} created successfully (using pe_helper)')
return self.get_dataset_info(name)
msg = 'Fileset created partially but no PE helper set'
log.error(msg)
raise PermissionError(msg)
else:
msg = 'Mountpoint property not set, can\'t run pe_helper'
log.error(msg)
raise PermissionError(msg)
else: args = [self.__exe, 'create']
log.error(f'Fileset "{name}" was created, but could not be mounted due to lack of permissions.' if recursive:
' Please set a PE helper and call "set_mountpoint" with an explicit mountpoint to' args += ['-p']
' complete the action')
raise PermissionError(proc.stderr) args += prop_args
else: args += [name]
try:
self.handle_command_error(proc) log.debug('Executing: %s', args)
except PermissionError: proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
log.error('Permission denied, please use "zfs allow"') if proc.returncode != 0 or len(proc.stderr) > 0: # pylint: disable=too-many-nested-blocks
raise # check if we tried something only root can do
else: if 'filesystem successfully created, but it may only be mounted by root' in proc.stderr:
log.info('Filesystem created successfully') log.debug('Command output indicates that we need to run the PE Helper')
return self.get_dataset_info(name) if self.pe_helper_mode != PEHelperMode.DO_NOT_USE:
# The mountpoint property may be set, in which case we can run the PE helper. if it is not set,
# we'd need to compute it based on the parent, but for now we simply error out.
if properties and 'mountpoint' in properties:
mopo = properties['mountpoint']
if self.pe_helper is not None:
test_prop = self.get_property(dataset=name, key='mountpoint', metadata=False)
if test_prop.value == mopo:
log.info('Fileset "%s" was created with mountpoint set', name)
else:
log.info('Fileset "%s" was created, using pe_helper to set the mountpoint', name)
self.pe_helper.zfs_set_mountpoint(name, mopo)
test_prop = self.get_property(dataset=name, key='mounted', metadata=False)
if test_prop.value == 'yes':
log.info('Fileset "%s" is mounted', name) # shouldn't be the case with the error above
else:
log.info('Using pe_helper to mount fileset "%s"', name)
self.pe_helper.zfs_mount(name)
log.info('Fileset "%s" created successfully (using pe_helper)', name)
return self.get_dataset_info(name)
msg = 'Fileset created partially but no PE helper set'
log.error(msg)
raise PermissionError(msg)
msg = 'Mountpoint property not set, can\'t run pe_helper'
log.error(msg)
raise PermissionError(msg)
log.error('Fileset "%s" was created, but could not be mounted due to lack of permissions. Please set a'
' PE helper and set the mode accordingly, and call "set_mountpoint" with an explicit '
'mountpoint to complete the action', name)
raise PermissionError(proc.stderr)
try:
self.handle_command_error(proc)
except PermissionError:
log.error('Permission denied, please use "zfs allow" and possibly set a PE Helper')
raise
log.info('Filesystem "%s" created successfully', name)
return self.get_dataset_info(name)
def _create_snapshot(self, name: str, properties: Dict[str, str] = None,
metadata_properties: Dict[str, str] = None, recursive: bool = False) -> Dataset:
prop_args: List[str] = []
if properties:
for normalkey, normalvalue in properties.items():
prop_args += ['-o', f'{normalkey}={normalvalue}']
if metadata_properties:
for metakey, metavalue in metadata_properties.items():
prop_args += ['-o', f'{metakey}={metavalue}']
args = [self.__exe, 'create']
if recursive:
args += ['-r']
elif dataset_type == DatasetType.VOLUME: args += prop_args
assert size is not None
args = [self.__exe, 'create'] log.debug('Executing %s', args)
if sparse: proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
args += ['-s'] if proc.returncode != 0 or len(proc.stderr) > 0:
if recursive: # TODO
args += ['-p'] self.handle_command_error(proc)
# [-b blocksize] is set using properties return self.get_dataset_info(name)
args += prop_args def _create_volume(self, name: str, properties: Dict[str, str] = None, metadata_properties: Dict[str, str] = None,
sparse: bool = False, size: Optional[int] = None, recursive: bool = False) -> Dataset:
prop_args: List[str] = []
if properties:
for normalkey, normalvalue in properties.items():
prop_args += ['-o', f'{normalkey}={normalvalue}']
if metadata_properties:
for metakey, metavalue in metadata_properties.items():
prop_args += ['-o', f'{metakey}={metavalue}']
args += ['-V', str(size), name] assert size is not None
print(f'Executing {args}') args = [self.__exe, 'create']
if sparse:
args += ['-s']
if recursive:
args += ['-p']
# [-b blocksize] is set using properties
elif dataset_type == DatasetType.SNAPSHOT: args += prop_args
assert size is None, 'Snapshots have no size'
assert sparse is False, 'Snapshots can\'t be sparse'
args = [self.__exe, 'snapshot', *prop_args, name] args += ['-V', str(size), name]
print(f'Executing {args}')
raise NotImplementedError() log.debug('Executing %s', args)
proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
if proc.returncode != 0 or len(proc.stderr) > 0:
# TODO
self.handle_command_error(proc)
return self.get_dataset_info(name)
def create_bookmark(self, snapshot: str, name: str) -> Dataset: def _create_bookmark(self, snapshot: str, name: str) -> Dataset:
validate_dataset_path(snapshot) validate_dataset_path(snapshot)
raise NotImplementedError() raise NotImplementedError()
@ -342,35 +346,54 @@ class ZFSCli(ZFS):
args.append(dataset) args.append(dataset)
log.debug(f'executing: {args}') log.debug(f'executing: {args}')
if self.pe_helper is not None and self.pe_helper_mode == PEHelperMode.USE_PROACTIVE:
test_prop = self.get_property(dataset, 'mounted')
if test_prop.value == 'yes':
log.info('Fileset is mounted, proactively unmounting using pe_helper')
self.pe_helper.zfs_umount(dataset)
proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
if proc.returncode != 0 or len(proc.stderr) > 0: if proc.returncode != 0 or len(proc.stderr) > 0:
log.debug(f'destroy_dataset: command failed, code={proc.returncode}, stderr="{proc.stderr}"') log.debug('destroy_dataset: command failed, code=%d, stderr="%s"', proc.returncode, proc.stderr.strip())
if 'has children' in proc.stderr: if 'has children' in proc.stderr:
if recursive: if recursive:
log.error(f'Dataset {dataset} has children and recursive was given, please report this') log.error('Dataset "%s" has children and recursive was given, please report this', dataset)
else: else:
log.warning(f'Dataset {dataset} has children and thus cannot be destroyed without recursive=True') log.warning('Dataset "%s" has children and thus cannot be destroyed without recursive=True',
dataset)
raise Exception raise Exception
# two possible messaes: (zfs destroy -p -r [-f] $fileset_with_snapshots) # three possible messages: (zfs destroy -p -r [-f] $fileset_with_snapshots)
# * 'cannot destroy snapshots: permission denied' # * 'cannot destroy snapshots: permission denied'
# * 'umount: only root can use "--types" option' # * 'umount: only root can use "--types" option'
# The latter seems to originate from having `destroy` and `mount` via `zfs allow`. # The latter seems to originate from having `destroy` and `mount` via `zfs allow`.
elif ('cannot destroy' in proc.stderr and 'permission denied' in proc.stderr) or \ elif ('cannot destroy' in proc.stderr and 'permission denied' in proc.stderr) or \
'only root can' in proc.stderr: 'only root can' in proc.stderr:
log.debug('Command output indicates that we need to run the PE Helper') log.debug('Command output indicates that we need to run the PE Helper')
if self.use_pe_helper: if self.pe_helper_mode != PEHelperMode.DO_NOT_USE:
if self.pe_helper is not None: if self.pe_helper is not None:
log.info(f'Using pe_helper to remove {dataset}') log.info('Using pe_helper to remove %s', dataset)
self.pe_helper.zfs_destroy_dataset(dataset, recursive, force_umount) self.pe_helper.zfs_destroy_dataset(dataset, recursive, force_umount)
log.info(f'Dataset {dataset} destroyed (using pe_helper)') log.info('Dataset "%s" destroyed (using pe_helper)', dataset)
else: else:
msg = 'Cannot destroy: No pe_helper set' msg = 'Cannot destroy: No pe_helper set'
log.error(msg) log.error(msg)
raise PermissionError(msg) raise PermissionError(msg)
else: else:
log.error(f'Dataset "{dataset}" can\'t be destroyed due to lack of permissions. Please set a' log.error('Dataset "%s" can\'t be destroyed due to lack of permissions. Please set a PE helper',
' PE helper') dataset)
raise PermissionError(proc.stderr) raise PermissionError(proc.stderr)
# Another one new with OpenZFS 2.0 that does not indicate what the problem is
# * 'cannot unmount '${fileset}': unmount failed'
elif 'cannot umount' in proc.stderr and 'umount failed' in proc.stderr:
if self.pe_helper is not None and self.pe_helper_mode != PEHelperMode.DO_NOT_USE:
log.info('Destroy could not unmount, retrying using pe_helper')
self.pe_helper.zfs_umount(dataset)
self._destroy_dataset(dataset, recursive=recursive, force_umount=force_umount)
else:
msg = 'Umounting failed and pe_helper is not allowed'
log.error(msg)
raise PermissionError(msg)
else: else:
try: try:
self.handle_command_error(proc) self.handle_command_error(proc)
@ -379,3 +402,25 @@ class ZFSCli(ZFS):
raise raise
else: else:
log.info('Dataset destroyed successfully') log.info('Dataset destroyed successfully')
# def _mount_umount_fileset(self, fileset: str, mount: bool) -> None:
# if '/' in fileset:
# validate_dataset_path(fileset)
# else:
# validate_pool_name(fileset)
# if not self.dataset_exists(fileset):
# raise DatasetNotFound('The fileset could not be found')
# test_prop = self.get_property(dataset=fileset, key='mounted')
# if mount:
# if test_prop.value == 'yes':
# log.warning('Fileset "%s" is already mounted', fileset)
# else:
# pass
# else:
# if test_prop.value != 'yes':
# log.warning('Fileset "%s" is not mounted', fileset)
# else:
# pass

Loading…
Cancel
Save