Skip to content

Commit

Permalink
Reserve space in a VG instead of padding thin pools on autopart
Browse files Browse the repository at this point in the history
Padding thin pools brings many complications and issues. For
example, what should we do with existing thin pools? What if
somebody really wants to create a thin pool of size not leaving
enough space for the padding in the VG? And more.

Nevertheless, we want to leave some space in the VG if doing
thinp auto partitioning because that's a good practice. However,
we already have an existing mechanism for that -- the VG can
reserve some space in itself (the size or percentage of which can
even be defined by user). So let's just extend and use this
mechanism to make sure there's some free space in the VG left for
the thin pool to grow when doing autopartitioning. If users give
us all the numbers, we should just do as they say, they probably
know what they are doing.

Related: rhbz#1131247
  • Loading branch information
vpodzime committed Apr 4, 2017
1 parent f89a556 commit 6326c28
Show file tree
Hide file tree
Showing 3 changed files with 66 additions and 20 deletions.
6 changes: 6 additions & 0 deletions blivet/autopart.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from .size import Size
from .devices.partition import PartitionDevice, FALLBACK_DEFAULT_PART_SIZE
from .devices.luks import LUKSDevice
from .devices.lvm import ThPoolReserveSpec
from .errors import NoDisksError, NotEnoughFreeSpaceError
from .formats import get_format
from .partitioning import do_partitioning, get_free_regions, grow_lvm
Expand All @@ -41,6 +42,8 @@
# maximum ratio of swap size to disk size (10 %)
MAX_SWAP_DISK_RATIO = Decimal('0.1')

AUTOPART_THPOOL_RESERVE = ThPoolReserveSpec(20, Size("1 GiB"), Size("100 GiB"))


def swap_suggestion(quiet=False, hibernation=False, disk_space=None):
"""
Expand Down Expand Up @@ -387,6 +390,9 @@ def _schedule_volumes(storage, devs):
pool = storage.new_lv(parents=[container], thin_pool=True, grow=True)
storage.create_device(pool)

# make sure VG reserves space for the pool to grow if needed
container.thpool_reserve = AUTOPART_THPOOL_RESERVE

if not btr and not lv and not thinlv:
continue

Expand Down
60 changes: 49 additions & 11 deletions blivet/devices/lvm.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,10 @@ def __init__(self, pv, size):
""" A namedtuple class holding the information about PV's (usable) size and free space """


ThPoolReserveSpec = namedtuple("ThPoolReserveSpec", ["percent", "min", "max"])
""" A namedtuple class for specifying restrictions of space reserved for a thin pool to grow """


class NotTypeSpecific(Exception):
"""Exception class for invalid type-specific calls"""
pass
Expand Down Expand Up @@ -138,6 +142,7 @@ def __init__(self, name, parents=None, size=None, free=None,
self.free = util.numeric_type(free)
self._reserved_percent = 0
self._reserved_space = Size(0)
self._thpool_reserve = None

if not self.exists:
self.pv_count = len(self.parents)
Expand Down Expand Up @@ -370,6 +375,16 @@ def is_modified(self):

return modified

@property
def thpool_reserve(self):
return self._thpool_reserve

@thpool_reserve.setter
def thpool_reserve(self, value):
if value is not None and not isinstance(value, ThPoolReserveSpec):
raise ValueError("Invalid thpool_reserve given, must be of type ThPoolReserveSpec")
self._thpool_reserve = value

@property
def reserved_space(self):
""" Reserved space in this VG """
Expand All @@ -378,6 +393,10 @@ def reserved_space(self):
reserved = self._reserved_percent * Decimal('0.01') * self.size
elif self._reserved_space > Size(0):
reserved = self._reserved_space
elif self._thpool_reserve and any(lv.is_thin_pool for lv in self._lvs):
reserved = min(max(self._thpool_reserve.percent * Decimal(0.01) * self.size,
self._thpool_reserve.min),
self._thpool_reserve.max)

# reserve space for the pmspare LV LVM creates behind our back
reserved += self.pmspare_size
Expand Down Expand Up @@ -623,8 +642,16 @@ def __init__(self, name, parents=None, size=None, uuid=None, seg_type=None,
# we reserve space for it
self._metadata_size = self.vg.pe_size
self._size -= self._metadata_size
elif self.seg_type == "thin-pool":
# LVMThinPoolMixin sets self._metadata_size on its own
if not self.exists and not from_lvs and not grow:
# a thin pool we are not going to grow -> lets calculate metadata
# size now if not given explicitly
# pylint: disable=no-member
self.autoset_md_size()
else:
self._metadata_size = Size(0)

self._internal_lvs = []

self._from_lvs = from_lvs
Expand Down Expand Up @@ -1485,19 +1512,30 @@ def lvs(self):
""" A list of this pool's LVs """
return self._lvs[:] # we don't want folks changing our list

@property
def vg_space_used(self):
# TODO: what about cached thin pools?
@util.requires_property("is_thin_pool")
def autoset_md_size(self):
""" If self._metadata_size not set already, it calculates the recommended value
and sets it while subtracting the size from self.size.
"""

space = self.data_vg_space_used + self.metadata_vg_space_used
if self._metadata_size != 0:
return # Metadata size already set

# We need to make the nonexisting thin pools pretend to be bigger so
# that their padding is not eaten by some other LV, but we need to
# provide consistent information with the LVM tools for existing
# thin pools.
if not self.exists:
space += Size(blockdev.lvm.get_thpool_padding(space, self.vg.pe_size))
return space
log.debug("Auto-setting thin pool metadata size")

# we need to know chunk size to calculate recommended metadata size
if self._chunk_size == 0:
self._chunk_size = Size(blockdev.LVM_DEFAULT_CHUNK_SIZE)
log.debug("Using default chunk size: %s", self._chunk_size)

self._metadata_size = Size(blockdev.lvm.get_thpool_meta_size(self._size,
self._chunk_size,
100)) # snapshots
log.debug("Recommended metadata size: %s", self._metadata_size)

log.debug("Adjusting size from %s to %s", self.size, self.size - self._metadata_size)
self.size = self.size - self._metadata_size

def _pre_create(self):
# make sure all the LVs this LV should be created from exist (if any)
Expand Down
20 changes: 11 additions & 9 deletions blivet/partitioning.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@
import gi
gi.require_version("BlockDev", "2.0")

from gi.repository import BlockDev as blockdev

import parted

from .errors import DeviceError, PartitioningError, AlignmentError
Expand Down Expand Up @@ -1957,10 +1955,6 @@ def _apply_chunk_growth(chunk):

size = chunk.length_to_size(req.base + req.growth)

# reduce the size of thin pools by the pad size
if req.device.is_thin_pool:
size -= Size(blockdev.lvm.get_thpool_padding(size, req.device.vg.pe_size, included=True))

# Base is pe, which means potentially rounded up by as much as
# pesize-1. As a result, you can't just add the growth to the
# initial size.
Expand Down Expand Up @@ -2000,9 +1994,6 @@ def grow_lvm(storage):
lv.req_size = max(lv.req_size, lv.used_space)
lv.size = lv.req_size

# add the required padding to the requested pool size
lv.req_size += Size(blockdev.lvm.get_thpool_padding(lv.req_size, vg.pe_size))

# establish sizes for the percentage-based requests (which are fixed)
percentage_based_lvs = [lv for lv in vg.lvs if lv.req_percent]
if sum(lv.req_percent for lv in percentage_based_lvs) > 100:
Expand All @@ -2021,6 +2012,17 @@ def grow_lvm(storage):
chunk.grow_requests()
_apply_chunk_growth(chunk)

# now that we have grown all thin pools (if any), let's calculate and
# set their metadata size if not told otherwise
for pool in vg.thinpools:
orig_pmspare_size = vg.pmspare_size
if not pool.exists and pool.metadata_size == Size(0):
pool.autoset_md_size()
if vg.pmspare_size != orig_pmspare_size:
# pmspare size change caused by the above step, let's trade part
# of pool's space for it
pool.size -= vg.pmspare_size - orig_pmspare_size

# now, grow thin lv requests within their respective pools
for pool in vg.thinpools:
requests = [LVRequest(l) for l in pool.lvs]
Expand Down

0 comments on commit 6326c28

Please sign in to comment.