Skip to content

Commit

Permalink
sandbox/virtcontainers: memory resource hotplug when create container.
Browse files Browse the repository at this point in the history
When create sandbox, we setup a sandbox of 2048M base memory, and
then hotplug memory that is needed for every new container. And
we change the unit of c.config.Resources.Mem from MiB to Byte in
order to prevent the 4095B < memory < 1MiB from being lost.

Fixes kata-containers#400

Signed-off-by: Clare Chen <clare.chenhui@huawei.com>
Signed-off-by: Zichang Lin <linzichang@huawei.com>
  • Loading branch information
linzichang committed Oct 4, 2018
1 parent e509150 commit 44b3ef1
Show file tree
Hide file tree
Showing 4 changed files with 54 additions and 108 deletions.
9 changes: 4 additions & 5 deletions virtcontainers/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,10 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
}
}()

if err := s.getAndStoreGuestDetails(); err != nil {
return nil, err
}

// Create Containers
if err = s.createContainers(); err != nil {
return nil, err
Expand All @@ -122,11 +126,6 @@ func createSandboxFromConfig(ctx context.Context, sandboxConfig SandboxConfig, f
return nil, err
}

// get and store guest details
if err := s.getAndStoreGuestDetails(); err != nil {
return nil, err
}

return s, nil
}

Expand Down
78 changes: 48 additions & 30 deletions virtcontainers/container.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ type ContainerResources struct {
VCPUs uint32

// Mem is the memory that is being used by the container
MemMB uint32
MemByte int64
}

// ContainerConfig describes one container runtime configuration.
Expand Down Expand Up @@ -984,7 +984,9 @@ func (c *Container) update(resources specs.LinuxResources) error {

newResources := ContainerResources{
VCPUs: uint32(utils.ConstraintsToVCPUs(*resources.CPU.Quota, *resources.CPU.Period)),
MemMB: uint32(*resources.Memory.Limit >> 20),
// do page align to memory, as cgroup memory.limit_in_bytes will be aligned to page when effect.
// TODO use GetGuestDetails to get the guest OS page size.
MemByte: (*resources.Memory.Limit >> 12) << 12,
}

if err := c.updateResources(currentConfig.Resources, newResources); err != nil {
Expand Down Expand Up @@ -1181,15 +1183,14 @@ func (c *Container) detachDevices() error {
}

func (c *Container) addResources() error {
//TODO add support for memory, Issue: https://github.com/containers/virtcontainers/issues/578
if c.config == nil {
return nil
}

// Container is being created, try to add the number of vCPUs specified
vCPUs := c.config.Resources.VCPUs
if vCPUs != 0 {
virtLog.Debugf("hot adding %d vCPUs", vCPUs)
virtLog.Debugf("create container: hot adding %d vCPUs", vCPUs)
data, err := c.sandbox.hypervisor.hotplugAddDevice(vCPUs, cpuDev)
if err != nil {
return err
Expand All @@ -1210,14 +1211,32 @@ func (c *Container) addResources() error {
}
}

return c.sandbox.agent.onlineCPUMem(vcpusAdded, true)
if err := c.sandbox.agent.onlineCPUMem(vcpusAdded, true); err != nil {
return err
}
}

// try to add the number of Mem specified
addMemByte := c.config.Resources.MemByte
if addMemByte != 0 {
memHotplugMB, err := c.calcHotplugMemMiBSize(addMemByte)
if err != nil {
return err
}
virtLog.Debugf("create container: hotplug %dMB mem", memHotplugMB)
_, err = c.sandbox.hypervisor.hotplugAddDevice(&memoryDevice{sizeMB: int(memHotplugMB)}, memoryDev)
if err != nil {
return err
}
if err := c.sandbox.agent.onlineCPUMem(0, false); err != nil {
return err
}
}

return nil
}

func (c *Container) removeResources() error {
//TODO add support for memory, Issue: https://github.com/containers/virtcontainers/issues/578
if c.config == nil {
return nil
}
Expand All @@ -1237,6 +1256,7 @@ func (c *Container) removeResources() error {
return err
}
}
// hot remove memory unsupported

return nil
}
Expand All @@ -1259,7 +1279,7 @@ func (c *Container) updateVCPUResources(oldResources, newResources ContainerReso
if oldVCPUs < newVCPUs {
// hot add vCPUs
vCPUs = newVCPUs - oldVCPUs
virtLog.Debugf("hot adding %d vCPUs", vCPUs)
virtLog.Debugf("update container: hot adding %d vCPUs", vCPUs)
data, err := c.sandbox.hypervisor.hotplugAddDevice(vCPUs, cpuDev)
if err != nil {
return err
Expand Down Expand Up @@ -1291,45 +1311,43 @@ func (c *Container) updateVCPUResources(oldResources, newResources ContainerReso
return nil
}

func (c *Container) memHotplugValid(mem uint32) (uint32, error) {
memorySectionSizeMB := c.sandbox.state.GuestMemoryBlockSizeMB
if memorySectionSizeMB == 0 {
return mem, nil
// calculate hotplug memory size with memory block size of guestos
func (c *Container) calcHotplugMemMiBSize(memByte int64) (uint32, error) {
memoryBlockSize := int64(c.sandbox.state.GuestMemoryBlockSizeMB)
if memoryBlockSize == 0 {
return uint32(memByte >> 20), nil
}

// TODO: hot add memory aligned to memory section should be more properly. See https://github.com/kata-containers/runtime/pull/624#issuecomment-419656853
return uint32(math.Ceil(float64(mem)/float64(memorySectionSizeMB))) * memorySectionSizeMB, nil
return uint32(int64(math.Ceil(float64(memByte)/float64(memoryBlockSize<<20))) * memoryBlockSize), nil
}

func (c *Container) updateMemoryResources(oldResources, newResources ContainerResources) error {
oldMemMB := oldResources.MemMB
newMemMB := newResources.MemMB
oldMemByte := oldResources.MemByte
newMemByte := newResources.MemByte

if oldMemMB == newMemMB {
if oldMemByte == newMemByte {
c.Logger().WithFields(logrus.Fields{
"old-mem": fmt.Sprintf("%dMB", oldMemMB),
"new-mem": fmt.Sprintf("%dMB", newMemMB),
"old-mem": fmt.Sprintf("%d", oldMemByte),
"new-mem": fmt.Sprintf("%d", newMemByte),
}).Debug("the actual number of Mem will not be modified")
return nil
}

if oldMemMB < newMemMB {
if oldMemByte < newMemByte {
// hot add memory
addMemMB := newMemMB - oldMemMB
memHotplugMB, err := c.memHotplugValid(addMemMB)
addMemByte := newMemByte - oldMemByte
memHotplugMB, err := c.calcHotplugMemMiBSize(addMemByte)
if err != nil {
return err
}

virtLog.Debugf("hotplug %dMB mem", memHotplugMB)
addMemDevice := &memoryDevice{
sizeMB: int(memHotplugMB),
}
_, err = c.sandbox.hypervisor.hotplugAddDevice(addMemDevice, memoryDev)
virtLog.Debugf("update container: hotplug %dMB mem", memHotplugMB)
_, err = c.sandbox.hypervisor.hotplugAddDevice(&memoryDevice{sizeMB: int(memHotplugMB)}, memoryDev)
if err != nil {
return err
}
newResources.MemMB = newMemMB
newResources.MemByte = newMemByte
if err := c.sandbox.agent.onlineCPUMem(0, false); err != nil {
return err
}
Expand All @@ -1341,7 +1359,7 @@ func (c *Container) updateMemoryResources(oldResources, newResources ContainerRe
func (c *Container) updateResources(oldResources, newResources ContainerResources) error {
// initialize with oldResources
c.config.Resources.VCPUs = oldResources.VCPUs
c.config.Resources.MemMB = oldResources.MemMB
c.config.Resources.MemByte = oldResources.MemByte

// Cpu is not updated if period and/or quota not set
if newResources.VCPUs != 0 {
Expand All @@ -1357,13 +1375,13 @@ func (c *Container) updateResources(oldResources, newResources ContainerResource
}

// Memory is not updated if memory limit not set
if newResources.MemMB != 0 {
if newResources.MemByte != 0 {
if err := c.updateMemoryResources(oldResources, newResources); err != nil {
return err
}

// Set and save container's config MemMB field only
c.config.Resources.MemMB = newResources.MemMB
// Set and save container's config Mem field only
c.config.Resources.MemByte = newResources.MemByte
return c.storeContainer()
}

Expand Down
27 changes: 2 additions & 25 deletions virtcontainers/pkg/oci/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -412,25 +412,6 @@ func (spec *CompatOCISpec) SandboxID() (string, error) {
return "", fmt.Errorf("Could not find sandbox ID")
}

func updateVMConfig(ocispec CompatOCISpec, config *RuntimeConfig) error {
if ocispec.Linux == nil || ocispec.Linux.Resources == nil {
return nil
}

if ocispec.Linux.Resources.Memory != nil &&
ocispec.Linux.Resources.Memory.Limit != nil {
memBytes := *ocispec.Linux.Resources.Memory.Limit
if memBytes <= 0 {
return fmt.Errorf("Invalid OCI memory limit %d", memBytes)
}
// Use some math magic to round up to the nearest Mb.
// This has the side effect that we can never have <1Mb assigned.
config.HypervisorConfig.MemorySize = uint32((memBytes + (1024*1024 - 1)) / (1024 * 1024))
}

return nil
}

func addAssetAnnotations(ocispec CompatOCISpec, config *vc.SandboxConfig) {
assetAnnotations := []string{
vcAnnotations.KernelPath,
Expand Down Expand Up @@ -469,11 +450,6 @@ func SandboxConfig(ocispec CompatOCISpec, runtime RuntimeConfig, bundlePath, cid
return vc.SandboxConfig{}, err
}

err = updateVMConfig(ocispec, &runtime)
if err != nil {
return vc.SandboxConfig{}, err
}

ociSpecJSON, err := json.Marshal(ocispec)
if err != nil {
return vc.SandboxConfig{}, err
Expand Down Expand Up @@ -570,7 +546,8 @@ func ContainerConfig(ocispec CompatOCISpec, bundlePath, cid, console string, det
}
if ocispec.Linux.Resources.Memory != nil {
if ocispec.Linux.Resources.Memory.Limit != nil {
resources.MemMB = uint32(*ocispec.Linux.Resources.Memory.Limit >> 20)
// do page align to memory, as cgroup memory.limit_in_bytes will be aligned to page when effect
resources.MemByte = (*ocispec.Linux.Resources.Memory.Limit >> 12) << 12
}
}

Expand Down
48 changes: 0 additions & 48 deletions virtcontainers/pkg/oci/utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,54 +254,6 @@ func TestMinimalSandboxConfig(t *testing.T) {
}
}

func TestUpdateVmConfig(t *testing.T) {
var limitBytes int64 = 128 * 1024 * 1024
assert := assert.New(t)

config := RuntimeConfig{
HypervisorConfig: vc.HypervisorConfig{
MemorySize: 2048,
},
}

expectedMem := uint32(128)

ocispec := CompatOCISpec{
Spec: specs.Spec{
Linux: &specs.Linux{
Resources: &specs.LinuxResources{
Memory: &specs.LinuxMemory{
Limit: &limitBytes,
},
},
},
},
}

err := updateVMConfig(ocispec, &config)
assert.Nil(err)
assert.Equal(config.HypervisorConfig.MemorySize, expectedMem)

limitBytes = -128 * 1024 * 1024
ocispec.Linux.Resources.Memory.Limit = &limitBytes

err = updateVMConfig(ocispec, &config)
assert.NotNil(err)

// Test case when Memory is nil
ocispec.Spec.Linux.Resources.Memory = nil
err = updateVMConfig(ocispec, &config)
assert.Nil(err)

// Test case when CPU is nil
ocispec.Spec.Linux.Resources.CPU = nil
limitBytes = 20
ocispec.Linux.Resources.Memory = &specs.LinuxMemory{Limit: &limitBytes}
err = updateVMConfig(ocispec, &config)
assert.Nil(err)
assert.NotEqual(config.HypervisorConfig.MemorySize, expectedMem)
}

func testStatusToOCIStateSuccessful(t *testing.T, cStatus vc.ContainerStatus, expected specs.State) {
ociState := StatusToOCIState(cStatus)

Expand Down

0 comments on commit 44b3ef1

Please sign in to comment.