Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor disk usage code in containerd handler #2955

Merged
merged 2 commits into from
Oct 11, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions container/containerd/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ type containerdClientMock struct {
cntrs map[string]*containers.Container
status *criapi.ContainerStatus
stats *criapi.ContainerStats
mounts []*types.Mount
returnErr error
}

Expand Down Expand Up @@ -58,14 +59,15 @@ func (c *containerdClientMock) ContainerStats(ctx context.Context, id string) (*
}

func (c *containerdClientMock) SnapshotMounts(ctx context.Context, snapshotter, key string) ([]*types.Mount, error) {
return nil, nil
return c.mounts, nil
}

func mockcontainerdClient(cntrs map[string]*containers.Container, status *criapi.ContainerStatus, stats *criapi.ContainerStats, returnErr error) ContainerdClient {
func mockcontainerdClient(cntrs map[string]*containers.Container, status *criapi.ContainerStatus, stats *criapi.ContainerStats, mounts []*types.Mount, returnErr error) ContainerdClient {
return &containerdClientMock{
cntrs: cntrs,
status: status,
stats: stats,
mounts: mounts,
returnErr: returnErr,
}
}
2 changes: 1 addition & 1 deletion container/containerd/factory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func TestCanHandleAndAccept(t *testing.T) {
testContainers["40af7cdcbe507acad47a5a62025743ad3ddc6ab93b77b21363aa1c1d641047c9"] = testContainer

f := &containerdFactory{
client: mockcontainerdClient(testContainers, nil, nil, nil),
client: mockcontainerdClient(testContainers, nil, nil, nil, nil),
cgroupSubsystems: containerlibcontainer.CgroupSubsystems{},
fsInfo: nil,
machineInfoFactory: nil,
Expand Down
124 changes: 69 additions & 55 deletions container/containerd/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"github.com/containerd/containerd/errdefs"
criapi "github.com/google/cadvisor/cri-api/pkg/apis/runtime/v1alpha2"
"golang.org/x/net/context"
"k8s.io/klog/v2"

"github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/common"
Expand Down Expand Up @@ -175,62 +176,10 @@ func newContainerdContainerHandler(
handler.image = cntr.Image

if includedMetrics.Has(container.DiskUsageMetrics) && cntr.Labels["io.cri-containerd.kind"] != "sandbox" {
mounts, err := client.SnapshotMounts(ctx, cntr.Snapshotter, cntr.SnapshotKey)
err = handler.fillDiskUsageInfo(ctx, client, machineInfoFactory, fsInfo, cntr.Snapshotter, cntr.SnapshotKey, rootfs, status.LogPath)
if err != nil {
return nil, fmt.Errorf("failed to obtain containerd snapshot mounts for disk usage metrics: %v", err)
klog.Errorf("error occured while filling disk usage info for container %s: %s", name, err)
}

// Default to top directory
snapshotDir := "/var/lib/containerd"
// TODO: only overlay snapshotters is handled as of now.
// Note: overlay returns single mount. https://github.com/containerd/containerd/blob/main/snapshots/overlay/overlay.go
if len(mounts) > 0 && mounts[0].Type == "overlay" {
for _, option := range mounts[0].Options {
// Example: upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/5001/fs
if strings.HasPrefix(option, "upperdir=") {
snapshotDir = option[len("upperdir="):]
break
}
}
}
deviceInfo, err := fsInfo.GetDirFsDevice(path.Join(rootfs, snapshotDir))
if err != nil {
return nil, err
}

mi, err := machineInfoFactory.GetMachineInfo()
if err != nil {
return nil, err
}

var (
fsLimit uint64
fsType string
fsTotalInodes uint64
)
// Containerd does not impose any filesystem limits for containers. So use capacity as limit.
for _, fs := range mi.Filesystems {
if fs.Device == deviceInfo.Device {
fsLimit = fs.Capacity
fsType = fs.Type
fsTotalInodes = fs.Inodes
break
}
}

handler.fsLimit = fsLimit
handler.fsType = fsType
handler.fsTotalInodes = fsTotalInodes
handler.device = deviceInfo.Device

handler.fsHandler = common.NewFsHandler(common.DefaultPeriod, &fsUsageProvider{
ctx: ctx,
client: client,
containerID: id,
// Path of logs, e.g. /var/log/pods/XXX
logPath: path.Join(rootfs, status.LogPath),
fsInfo: fsInfo,
})
}

for _, exposedEnv := range metadataEnvAllowList {
Expand All @@ -252,6 +201,71 @@ func newContainerdContainerHandler(
return handler, nil
}

func (h *containerdContainerHandler) fillDiskUsageInfo(
ctx context.Context,
client ContainerdClient,
machineInfoFactory info.MachineInfoFactory,
fsInfo fs.FsInfo,
snapshotter, snapshotKey, rootfs, logPath string) error {
mounts, err := client.SnapshotMounts(ctx, snapshotter, snapshotKey)
if err != nil {
return fmt.Errorf("failed to obtain containerd snapshot mounts for disk usage metrics: %v", err)
}

// Default to top directory
snapshotDir := "/var/lib/containerd"
// TODO: only overlay snapshotters is handled as of now.
// Note: overlay returns single mount. https://github.com/containerd/containerd/blob/main/snapshots/overlay/overlay.go
if len(mounts) > 0 && mounts[0].Type == "overlay" {
for _, option := range mounts[0].Options {
// Example: upperdir=/var/lib/containerd/io.containerd.snapshotter.v1.overlayfs/snapshots/5001/fs
if strings.HasPrefix(option, "upperdir=") {
snapshotDir = option[len("upperdir="):]
break
}
}
}
deviceInfo, err := fsInfo.GetDirFsDevice(path.Join(rootfs, snapshotDir))
if err != nil {
return err
}

mi, err := machineInfoFactory.GetMachineInfo()
if err != nil {
return err
}

var (
fsLimit uint64
fsType string
fsTotalInodes uint64
)
// Containerd does not impose any filesystem limits for containers. So use capacity as limit.
for _, fs := range mi.Filesystems {
if fs.Device == deviceInfo.Device {
fsLimit = fs.Capacity
fsType = fs.Type
fsTotalInodes = fs.Inodes
break
}
}

h.fsLimit = fsLimit
h.fsType = fsType
h.fsTotalInodes = fsTotalInodes
h.device = deviceInfo.Device

h.fsHandler = common.NewFsHandler(common.DefaultPeriod, &fsUsageProvider{
ctx: ctx,
client: client,
containerID: h.reference.Id,
// Path of logs, e.g. /var/log/pods/XXX
logPath: path.Join(rootfs, logPath),
fsInfo: fsInfo,
})
return nil
}

func (h *containerdContainerHandler) ContainerReference() (info.ContainerReference, error) {
return h.reference, nil
}
Expand Down Expand Up @@ -289,7 +303,7 @@ func (h *containerdContainerHandler) getFsStats(stats *info.ContainerStats) erro
common.AssignDeviceNamesToDiskStats((*common.MachineInfoNamer)(mi), &stats.DiskIo)
}

if !h.includedMetrics.Has(container.DiskUsageMetrics) || h.labels["io.cri-containerd.kind"] == "sandbox" {
if !h.includedMetrics.Has(container.DiskUsageMetrics) || h.fsHandler == nil || h.labels["io.cri-containerd.kind"] == "sandbox" {
return nil
}

Expand Down
Loading