diff --git a/blivet/autopart.py b/blivet/autopart.py index 613b23147..527885047 100644 --- a/blivet/autopart.py +++ b/blivet/autopart.py @@ -27,6 +27,7 @@ from .size import Size from .devices.partition import PartitionDevice, FALLBACK_DEFAULT_PART_SIZE from .devices.luks import LUKSDevice +from .devices.lvm import ThPoolReserveSpec from .errors import NoDisksError, NotEnoughFreeSpaceError from .formats import get_format from .partitioning import do_partitioning, get_free_regions, grow_lvm @@ -41,6 +42,8 @@ # maximum ratio of swap size to disk size (10 %) MAX_SWAP_DISK_RATIO = Decimal('0.1') +AUTOPART_THPOOL_RESERVE = ThPoolReserveSpec(20, Size("1 GiB"), Size("100 GiB")) + def swap_suggestion(quiet=False, hibernation=False, disk_space=None): """ @@ -387,6 +390,9 @@ def _schedule_volumes(storage, devs): pool = storage.new_lv(parents=[container], thin_pool=True, grow=True) storage.create_device(pool) + # make sure VG reserves space for the pool to grow if needed + container.thpool_reserve = AUTOPART_THPOOL_RESERVE + if not btr and not lv and not thinlv: continue diff --git a/blivet/devices/lvm.py b/blivet/devices/lvm.py index 60cdb95bb..ac52c4aa2 100644 --- a/blivet/devices/lvm.py +++ b/blivet/devices/lvm.py @@ -67,6 +67,10 @@ def __init__(self, pv, size): """ A namedtuple class holding the information about PV's (usable) size and free space """ +ThPoolReserveSpec = namedtuple("ThPoolReserveSpec", ["percent", "min", "max"]) +""" A namedtuple class for specifying restrictions of space reserved for a thin pool to grow """ + + class NotTypeSpecific(Exception): """Exception class for invalid type-specific calls""" pass @@ -138,6 +142,7 @@ def __init__(self, name, parents=None, size=None, free=None, self.free = util.numeric_type(free) self._reserved_percent = 0 self._reserved_space = Size(0) + self._thpool_reserve = None if not self.exists: self.pv_count = len(self.parents) @@ -370,6 +375,16 @@ def is_modified(self): return modified + @property + def thpool_reserve(self): + return self._thpool_reserve + + @thpool_reserve.setter + def thpool_reserve(self, value): + if value is not None and not isinstance(value, ThPoolReserveSpec): + raise ValueError("Invalid thpool_reserve given, must be of type ThPoolReserveSpec") + self._thpool_reserve = value + @property def reserved_space(self): """ Reserved space in this VG """ @@ -378,6 +393,10 @@ def reserved_space(self): reserved = self._reserved_percent * Decimal('0.01') * self.size elif self._reserved_space > Size(0): reserved = self._reserved_space + elif self._thpool_reserve and any(lv.is_thin_pool for lv in self._lvs): + reserved = min(max(self._thpool_reserve.percent * Decimal(0.01) * self.size, + self._thpool_reserve.min), + self._thpool_reserve.max) # reserve space for the pmspare LV LVM creates behind our back reserved += self.pmspare_size @@ -623,8 +642,16 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None, # we reserve space for it self._metadata_size = self.vg.pe_size self._size -= self._metadata_size + elif self.seg_type == "thin-pool": + # LVMThinPoolMixin sets self._metadata_size on its own + if not self.exists and not from_lvs and not grow: + # a thin pool we are not going to grow -> lets calculate metadata + # size now if not given explicitly + # pylint: disable=no-member + self.autoset_md_size() else: self._metadata_size = Size(0) + self._internal_lvs = [] self._from_lvs = from_lvs @@ -1485,19 +1512,30 @@ def lvs(self): """ A list of this pool's LVs """ return self._lvs[:] # we don't want folks changing our list - @property - def vg_space_used(self): - # TODO: what about cached thin pools? + @util.requires_property("is_thin_pool") + def autoset_md_size(self): + """ If self._metadata_size not set already, it calculates the recommended value + and sets it while subtracting the size from self.size. + + """ - space = self.data_vg_space_used + self.metadata_vg_space_used + if self._metadata_size != 0: + return # Metadata size already set - # We need to make the nonexisting thin pools pretend to be bigger so - # that their padding is not eaten by some other LV, but we need to - # provide consistent information with the LVM tools for existing - # thin pools. - if not self.exists: - space += Size(blockdev.lvm.get_thpool_padding(space, self.vg.pe_size)) - return space + log.debug("Auto-setting thin pool metadata size") + + # we need to know chunk size to calculate recommended metadata size + if self._chunk_size == 0: + self._chunk_size = Size(blockdev.LVM_DEFAULT_CHUNK_SIZE) + log.debug("Using default chunk size: %s", self._chunk_size) + + self._metadata_size = Size(blockdev.lvm.get_thpool_meta_size(self._size, + self._chunk_size, + 100)) # snapshots + log.debug("Recommended metadata size: %s", self._metadata_size) + + log.debug("Adjusting size from %s to %s", self.size, self.size - self._metadata_size) + self.size = self.size - self._metadata_size def _pre_create(self): # make sure all the LVs this LV should be created from exist (if any) diff --git a/blivet/partitioning.py b/blivet/partitioning.py index ba83ac48f..703254ecd 100644 --- a/blivet/partitioning.py +++ b/blivet/partitioning.py @@ -27,8 +27,6 @@ import gi gi.require_version("BlockDev", "2.0") -from gi.repository import BlockDev as blockdev - import parted from .errors import DeviceError, PartitioningError, AlignmentError @@ -1957,10 +1955,6 @@ def _apply_chunk_growth(chunk): size = chunk.length_to_size(req.base + req.growth) - # reduce the size of thin pools by the pad size - if req.device.is_thin_pool: - size -= Size(blockdev.lvm.get_thpool_padding(size, req.device.vg.pe_size, included=True)) - # Base is pe, which means potentially rounded up by as much as # pesize-1. As a result, you can't just add the growth to the # initial size. @@ -2000,9 +1994,6 @@ def grow_lvm(storage): lv.req_size = max(lv.req_size, lv.used_space) lv.size = lv.req_size - # add the required padding to the requested pool size - lv.req_size += Size(blockdev.lvm.get_thpool_padding(lv.req_size, vg.pe_size)) - # establish sizes for the percentage-based requests (which are fixed) percentage_based_lvs = [lv for lv in vg.lvs if lv.req_percent] if sum(lv.req_percent for lv in percentage_based_lvs) > 100: @@ -2021,6 +2012,17 @@ def grow_lvm(storage): chunk.grow_requests() _apply_chunk_growth(chunk) + # now that we have grown all thin pools (if any), let's calculate and + # set their metadata size if not told otherwise + for pool in vg.thinpools: + orig_pmspare_size = vg.pmspare_size + if not pool.exists and pool.metadata_size == Size(0): + pool.autoset_md_size() + if vg.pmspare_size != orig_pmspare_size: + # pmspare size change caused by the above step, let's trade part + # of pool's space for it + pool.size -= vg.pmspare_size - orig_pmspare_size + # now, grow thin lv requests within their respective pools for pool in vg.thinpools: requests = [LVRequest(l) for l in pool.lvs]