From bdffc25724267824bec76d677b8c167de7bad787 Mon Sep 17 00:00:00 2001 From: Ravi Chamarthy Date: Fri, 30 Aug 2024 20:35:12 +0000 Subject: [PATCH] feat: refactor to use machinerun.io/atomfs Signed-off-by: Ravi Chamarthy --- cmd/stacker/build.go | 2 +- cmd/stacker/inspect.go | 2 +- cmd/stacker/internal_go.go | 2 +- cmd/stacker/publish.go | 2 +- go.mod | 12 +- go.sum | 2 + pkg/atomfs/molecule.go | 266 ------------ pkg/atomfs/molecule_test.go | 25 -- pkg/atomfs/oci.go | 59 --- pkg/lib/image_test.go | 2 +- pkg/mount/mountinfo.go | 95 ----- pkg/oci/oci.go | 78 ---- pkg/overlay/metadata.go | 2 +- pkg/overlay/pack.go | 6 +- pkg/squashfs/mediatype.go | 37 -- pkg/squashfs/squashfs.go | 744 ---------------------------------- pkg/squashfs/superblock.go | 263 ------------ pkg/squashfs/verity.go | 522 ------------------------ pkg/squashfs/verity_static.go | 10 - pkg/squashfs/verity_test.go | 134 ------ pkg/types/layer_type.go | 2 +- 21 files changed, 20 insertions(+), 2247 deletions(-) delete mode 100644 pkg/atomfs/molecule.go delete mode 100644 pkg/atomfs/molecule_test.go delete mode 100644 pkg/atomfs/oci.go delete mode 100644 pkg/mount/mountinfo.go delete mode 100644 pkg/oci/oci.go delete mode 100644 pkg/squashfs/mediatype.go delete mode 100644 pkg/squashfs/squashfs.go delete mode 100644 pkg/squashfs/superblock.go delete mode 100644 pkg/squashfs/verity.go delete mode 100644 pkg/squashfs/verity_static.go delete mode 100644 pkg/squashfs/verity_test.go diff --git a/cmd/stacker/build.go b/cmd/stacker/build.go index 69763f81..a46ea050 100644 --- a/cmd/stacker/build.go +++ b/cmd/stacker/build.go @@ -4,7 +4,7 @@ import ( "fmt" cli "github.com/urfave/cli/v2" - "stackerbuild.io/stacker/pkg/squashfs" + "machinerun.io/atomfs/squashfs" "stackerbuild.io/stacker/pkg/stacker" "stackerbuild.io/stacker/pkg/types" ) diff --git a/cmd/stacker/inspect.go b/cmd/stacker/inspect.go index 92fea20b..43aeaee9 100644 --- a/cmd/stacker/inspect.go +++ b/cmd/stacker/inspect.go @@ -11,7 +11,7 @@ import ( "github.com/opencontainers/umoci/oci/casext" "github.com/pkg/errors" cli "github.com/urfave/cli/v2" - stackeroci "stackerbuild.io/stacker/pkg/oci" + stackeroci "machinerun.io/atomfs/oci" ) var inspectCmd = cli.Command{ diff --git a/cmd/stacker/internal_go.go b/cmd/stacker/internal_go.go index d7abeab4..2e8fd992 100644 --- a/cmd/stacker/internal_go.go +++ b/cmd/stacker/internal_go.go @@ -10,7 +10,7 @@ import ( "github.com/pkg/errors" cli "github.com/urfave/cli/v2" "golang.org/x/sys/unix" - "stackerbuild.io/stacker/pkg/atomfs" + "machinerun.io/atomfs" "stackerbuild.io/stacker/pkg/lib" "stackerbuild.io/stacker/pkg/log" "stackerbuild.io/stacker/pkg/overlay" diff --git a/cmd/stacker/publish.go b/cmd/stacker/publish.go index 30c9acaa..3eff0bb3 100644 --- a/cmd/stacker/publish.go +++ b/cmd/stacker/publish.go @@ -3,8 +3,8 @@ package main import ( "github.com/pkg/errors" cli "github.com/urfave/cli/v2" + "machinerun.io/atomfs/squashfs" "stackerbuild.io/stacker/pkg/lib" - "stackerbuild.io/stacker/pkg/squashfs" "stackerbuild.io/stacker/pkg/stacker" "stackerbuild.io/stacker/pkg/types" ) diff --git a/go.mod b/go.mod index 2db64f74..7523a5f6 100644 --- a/go.mod +++ b/go.mod @@ -5,25 +5,22 @@ go 1.21.0 toolchain go1.21.6 require ( - github.com/Masterminds/semver/v3 v3.2.1 github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be github.com/apex/log v1.9.0 github.com/apparentlymart/go-shquot v0.0.1 github.com/cheggaaa/pb/v3 v3.1.2 github.com/containers/image/v5 v5.24.2 github.com/dustin/go-humanize v1.0.1 - github.com/freddierice/go-losetup v0.0.0-20220711213114-2a14873012db github.com/justincormack/go-memfd v0.0.0-20170219213707-6e4af0518993 github.com/klauspost/pgzip v1.2.6 github.com/lxc/go-lxc v0.0.0-20230926171149-ccae595aa49e github.com/lxc/incus v0.3.1-0.20231215145534-1719ffcbab9d - github.com/martinjungblut/go-cryptsetup v0.0.0-20220520180014-fd0874fd07a6 github.com/minio/sha256-simd v1.0.1 github.com/mitchellh/hashstructure v1.1.0 github.com/moby/buildkit v0.11.4 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0-rc4 - github.com/opencontainers/umoci v0.0.0-00000000000000-000000000000 + github.com/opencontainers/umoci v0.4.8-0.20220412065115-12453f247749 github.com/pkg/errors v0.9.1 github.com/pkg/xattr v0.4.9 github.com/sirupsen/logrus v1.9.3 @@ -41,6 +38,12 @@ require ( stackerbuild.io/stacker-bom v0.0.0-00010101000000-000000000000 ) +require ( + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/freddierice/go-losetup v0.0.0-20220711213114-2a14873012db // indirect + github.com/martinjungblut/go-cryptsetup v0.0.0-20220520180014-fd0874fd07a6 // indirect +) + require ( dario.cat/mergo v1.0.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect @@ -274,6 +277,7 @@ require ( gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + machinerun.io/atomfs v1.1.1 modernc.org/libc v1.37.6 // indirect modernc.org/mathutil v1.6.0 // indirect modernc.org/memory v1.7.2 // indirect diff --git a/go.sum b/go.sum index d6ad53ca..ca9fd677 100644 --- a/go.sum +++ b/go.sum @@ -1592,6 +1592,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +machinerun.io/atomfs v1.1.1 h1:EprTiYMzAlKL+3S7woe9DsCJGwO2dkHTlvmjlVNO8pY= +machinerun.io/atomfs v1.1.1/go.mod h1:cidyEmsNeeo+9f7OiHl/nA+8KS7Vj5XOslR87VkIebM= modernc.org/libc v1.37.6 h1:orZH3c5wmhIQFTXF+Nt+eeauyd+ZIt2BX6ARe+kD+aw= modernc.org/libc v1.37.6/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE= modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= diff --git a/pkg/atomfs/molecule.go b/pkg/atomfs/molecule.go deleted file mode 100644 index cd484567..00000000 --- a/pkg/atomfs/molecule.go +++ /dev/null @@ -1,266 +0,0 @@ -package atomfs - -import ( - "os" - "path" - "path/filepath" - "strings" - - ispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "golang.org/x/sys/unix" - "stackerbuild.io/stacker/pkg/mount" - "stackerbuild.io/stacker/pkg/squashfs" -) - -type Molecule struct { - // Atoms is the list of atoms in this Molecule. The first element in - // this list is the top most layer in the overlayfs. - Atoms []ispec.Descriptor - - config MountOCIOpts -} - -// mountUnderlyingAtoms mounts all the underlying atoms at -// config.MountedAtomsPath(). -func (m Molecule) mountUnderlyingAtoms() error { - // in the case that we have a verity or other mount error we need to - // tear down the other underlying atoms so we don't leave verity and loop - // devices around unused. - atomsMounted := []string{} - cleanupAtoms := func(err error) error { - for _, target := range atomsMounted { - if umountErr := squashfs.Umount(target); umountErr != nil { - return errors.Wrapf(umountErr, "failed to unmount atom @ target %q while handling error: %s", target, err) - } - } - return err - } - - for _, a := range m.Atoms { - target := m.config.MountedAtomsPath(a.Digest.Encoded()) - - rootHash := a.Annotations[squashfs.VerityRootHashAnnotation] - - if !m.config.AllowMissingVerityData && rootHash == "" { - return errors.Errorf("%v is missing verity data", a.Digest) - } - - mounts, err := mount.ParseMounts("/proc/self/mountinfo") - if err != nil { - return err - } - - mountpoint, mounted := mounts.FindMount(target) - - if mounted { - if rootHash != "" { - err = squashfs.ConfirmExistingVerityDeviceHash(mountpoint.Source, - rootHash, - m.config.AllowMissingVerityData) - if err != nil { - return err - } - } - continue - } - - if err := os.MkdirAll(target, 0755); err != nil { - return err - } - - err = squashfs.Mount(m.config.AtomsPath(a.Digest.Encoded()), target, rootHash) - if err != nil { - return cleanupAtoms(err) - } - - atomsMounted = append(atomsMounted, target) - } - - return nil -} - -// overlayArgs - returns all of the mount options to pass to the kernel to -// actually mount this molecule. -// This function assumes read-only. It does not provide upperdir or workdir. -func (m Molecule) overlayArgs(dest string) (string, error) { - dirs := []string{} - for _, a := range m.Atoms { - target := m.config.MountedAtomsPath(a.Digest.Encoded()) - dirs = append(dirs, target) - } - - // overlay doesn't work with only one lowerdir and no upperdir. - // For consistency in that specific case we add a hack here. - // We create an empty directory called "workaround" in the mounts - // directory, and add that to lowerdir list. - if len(dirs) == 1 { - workaround := m.config.MountedAtomsPath("workaround") - if err := os.MkdirAll(workaround, 0755); err != nil { - return "", errors.Wrapf(err, "couldn't make workaround dir") - } - - dirs = append(dirs, workaround) - } - - // Note that in overlayfs, the first thing is the top most layer in the - // overlay. - mntOpts := "index=off,xino=on,userxattr,lowerdir=" + strings.Join(dirs, ":") - return mntOpts, nil -} - -// device mapper has no namespacing. if two different binaries invoke this code -// (for example, the stacker test suite), we want to be sure we don't both -// create or delete devices out from the other one when they have detected the -// device exists. so try to cooperate via this lock. -var advisoryLockPath = path.Join(os.TempDir(), ".atomfs-lock") - -func makeLock(mountpoint string) (*os.File, error) { - lockfile, err := os.Create(advisoryLockPath) - if err == nil { - return lockfile, nil - } - // backup plan: lock the destination as ${path}.atomfs-lock - mountpoint = strings.TrimSuffix(mountpoint, "/") - lockPath := filepath.Join(mountpoint, ".atomfs-lock") - var err2 error - lockfile, err2 = os.Create(lockPath) - if err2 == nil { - return lockfile, nil - } - - err = errors.Errorf("Failed locking %s: %v\nFailed locking %s: %v", advisoryLockPath, err, lockPath, err2) - return lockfile, err -} - -func (m Molecule) Mount(dest string) error { - lockfile, err := makeLock(dest) - if err != nil { - return errors.WithStack(err) - } - defer lockfile.Close() - - err = unix.Flock(int(lockfile.Fd()), unix.LOCK_EX) - if err != nil { - return errors.WithStack(err) - } - - mntOpts, err := m.overlayArgs(dest) - if err != nil { - return err - } - - // The kernel doesn't allow mount options longer than 4096 chars, so - // let's give a nicer error than -EINVAL here. - if len(mntOpts) > 4096 { - return errors.Errorf("too many lower dirs; must have fewer than 4096 chars") - } - - err = m.mountUnderlyingAtoms() - if err != nil { - return err - } - - // now, do the actual overlay mount - err = unix.Mount("overlay", dest, "overlay", 0, mntOpts) - return errors.Wrapf(err, "couldn't do overlay mount to %s, opts: %s", dest, mntOpts) -} - -func Umount(dest string) error { - var err error - dest, err = filepath.Abs(dest) - if err != nil { - return errors.Wrapf(err, "couldn't create abs path for %v", dest) - } - - lockfile, err := makeLock(dest) - if err != nil { - return errors.WithStack(err) - } - defer lockfile.Close() - - err = unix.Flock(int(lockfile.Fd()), unix.LOCK_EX) - if err != nil { - return errors.WithStack(err) - } - - mounts, err := mount.ParseMounts("/proc/self/mountinfo") - if err != nil { - return err - } - - underlyingAtoms := []string{} - for _, m := range mounts { - if m.FSType != "overlay" { - continue - } - - if m.Target != dest { - continue - } - - underlyingAtoms, err = m.GetOverlayDirs() - if err != nil { - return err - } - break - } - - if len(underlyingAtoms) == 0 { - return errors.Errorf("%s is not an atomfs mountpoint", dest) - } - - if err := unix.Unmount(dest, 0); err != nil { - return err - } - - // now, "refcount" the remaining atoms and see if any of ours are - // unused - usedAtoms := map[string]int{} - - mounts, err = mount.ParseMounts("/proc/self/mountinfo") - if err != nil { - return err - } - - for _, m := range mounts { - if m.FSType != "overlay" { - continue - } - - dirs, err := m.GetOverlayDirs() - if err != nil { - return err - } - for _, d := range dirs { - usedAtoms[d]++ - } - } - - // If any of the atoms underlying the target mountpoint are now unused, - // let's unmount them too. - for _, a := range underlyingAtoms { - _, used := usedAtoms[a] - if used { - continue - } - /* TODO: some kind of logging - if !used { - log.Warnf("unused atom %s was part of this molecule?") - continue - } - */ - - // the workaround dir isn't really a mountpoint, so don't unmount it - if path.Base(a) == "workaround" { - continue - } - - err = squashfs.Umount(a) - if err != nil { - return err - } - } - - return nil -} diff --git a/pkg/atomfs/molecule_test.go b/pkg/atomfs/molecule_test.go deleted file mode 100644 index e60ce159..00000000 --- a/pkg/atomfs/molecule_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package atomfs - -import ( - "fmt" - "testing" - - digest "github.com/opencontainers/go-digest" - ispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/stretchr/testify/assert" -) - -func TestAllowMissingVerityData(t *testing.T) { - assert := assert.New(t) - - // no root hash annotations on this descriptor... - const hash = "73cd1a9ab86defeb5e22151ceb96b347fc58b4318f64be05046c51d407a364eb" - d := digest.NewDigestFromEncoded(digest.Algorithm("sha256"), hash) - mol := Molecule{ - Atoms: []ispec.Descriptor{ispec.Descriptor{Digest: d}}, - } - - err := mol.mountUnderlyingAtoms() - assert.NotNil(err) - assert.Equal(fmt.Sprintf("sha256:%s is missing verity data", hash), err.Error()) -} diff --git a/pkg/atomfs/oci.go b/pkg/atomfs/oci.go deleted file mode 100644 index c3acbbc4..00000000 --- a/pkg/atomfs/oci.go +++ /dev/null @@ -1,59 +0,0 @@ -package atomfs - -import ( - "path" - - ispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/opencontainers/umoci" - stackeroci "stackerbuild.io/stacker/pkg/oci" -) - -type MountOCIOpts struct { - OCIDir string - MetadataPath string - Tag string - Target string - AllowMissingVerityData bool -} - -func (c MountOCIOpts) AtomsPath(parts ...string) string { - atoms := path.Join(c.OCIDir, "blobs", "sha256") - return path.Join(append([]string{atoms}, parts...)...) -} - -func (c MountOCIOpts) MountedAtomsPath(parts ...string) string { - mounts := path.Join(c.MetadataPath, "mounts") - return path.Join(append([]string{mounts}, parts...)...) -} - -func BuildMoleculeFromOCI(opts MountOCIOpts) (Molecule, error) { - oci, err := umoci.OpenLayout(opts.OCIDir) - if err != nil { - return Molecule{}, err - } - defer oci.Close() - - man, err := stackeroci.LookupManifest(oci, opts.Tag) - if err != nil { - return Molecule{}, err - } - - atoms := []ispec.Descriptor{} - atoms = append(atoms, man.Layers...) - - // The OCI spec says that the first layer should be the bottom most - // layer. In overlay it's the top most layer. Since the atomfs codebase - // is mostly a wrapper around overlayfs, let's keep things in our db in - // the same order that overlay expects them, i.e. the first layer is - // the top most. That means we need to reverse the order in which the - // atoms were inserted, because they were backwards. - // - // It's also terrible that golang doesn't have a reverse function, but - // that's a discussion for a different block comment. - for i := len(atoms)/2 - 1; i >= 0; i-- { - opp := len(atoms) - 1 - i - atoms[i], atoms[opp] = atoms[opp], atoms[i] - } - - return Molecule{Atoms: atoms, config: opts}, nil -} diff --git a/pkg/lib/image_test.go b/pkg/lib/image_test.go index 8d620fef..db532a03 100644 --- a/pkg/lib/image_test.go +++ b/pkg/lib/image_test.go @@ -13,7 +13,7 @@ import ( "github.com/opencontainers/umoci/mutate" "github.com/opencontainers/umoci/oci/casext" "github.com/stretchr/testify/assert" - "stackerbuild.io/stacker/pkg/squashfs" + "machinerun.io/atomfs/squashfs" ) func createImage(dir string, tag string) error { diff --git a/pkg/mount/mountinfo.go b/pkg/mount/mountinfo.go deleted file mode 100644 index aa58b123..00000000 --- a/pkg/mount/mountinfo.go +++ /dev/null @@ -1,95 +0,0 @@ -package mount - -import ( - "bufio" - "os" - "strings" - - "github.com/pkg/errors" -) - -type Mount struct { - Source string - Target string - FSType string - Opts []string -} - -func (m Mount) GetOverlayDirs() ([]string, error) { - if m.FSType != "overlay" { - return nil, errors.Errorf("%s is not an overlayfs", m.Target) - } - - for _, opt := range m.Opts { - if !strings.HasPrefix(opt, "lowerdir=") { - continue - } - - return strings.Split(strings.TrimPrefix(opt, "lowerdir="), ":"), nil - } - - return nil, errors.Errorf("no lowerdirs found") -} - -type Mounts []Mount - -func (ms Mounts) FindMount(p string) (Mount, bool) { - for _, m := range ms { - if m.Target == p { - return m, true - } - } - - return Mount{}, false -} - -func ParseMounts(mountinfo string) (Mounts, error) { - f, err := os.Open(mountinfo) - if err != nil { - return nil, errors.Wrapf(err, "couldn't open %s", mountinfo) - } - defer f.Close() - - mounts := []Mount{} - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - fields := strings.Fields(line) - mount := Mount{} - mount.Target = fields[4] - - for i := 5; i < len(fields); i++ { - if fields[i] != "-" { - continue - } - - mount.FSType = fields[i+1] - mount.Source = fields[i+2] - mount.Opts = strings.Split(fields[i+3], ",") - } - - mounts = append(mounts, mount) - } - - return mounts, nil -} - -func IsMountpoint(target string) (bool, error) { - _, mounted, err := FindMount(target) - return mounted, err -} - -func FindMount(target string) (Mount, bool, error) { - mounts, err := ParseMounts("/proc/self/mountinfo") - if err != nil { - return Mount{}, false, err - } - - for _, mount := range mounts { - if mount.Target == strings.TrimRight(target, "/") { - return mount, true, nil - } - } - - return Mount{}, false, nil -} diff --git a/pkg/oci/oci.go b/pkg/oci/oci.go deleted file mode 100644 index 78eb7885..00000000 --- a/pkg/oci/oci.go +++ /dev/null @@ -1,78 +0,0 @@ -package oci - -import ( - "context" - - ispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/opencontainers/umoci/oci/casext" - "github.com/pkg/errors" -) - -func LookupManifest(oci casext.Engine, tag string) (ispec.Manifest, error) { - descriptorPaths, err := oci.ResolveReference(context.Background(), tag) - if err != nil { - return ispec.Manifest{}, err - } - - if len(descriptorPaths) != 1 { - return ispec.Manifest{}, errors.Errorf("bad descriptor %s", tag) - } - - blob, err := oci.FromDescriptor(context.Background(), descriptorPaths[0].Descriptor()) - if err != nil { - return ispec.Manifest{}, err - } - defer blob.Close() - - if blob.Descriptor.MediaType != ispec.MediaTypeImageManifest { - return ispec.Manifest{}, errors.Errorf("descriptor does not point to a manifest: %s", blob.Descriptor.MediaType) - } - - return blob.Data.(ispec.Manifest), nil -} - -func LookupConfig(oci casext.Engine, desc ispec.Descriptor) (ispec.Image, error) { - configBlob, err := oci.FromDescriptor(context.Background(), desc) - if err != nil { - return ispec.Image{}, err - } - - if configBlob.Descriptor.MediaType != ispec.MediaTypeImageConfig { - return ispec.Image{}, errors.Errorf("bad image config type: %s", configBlob.Descriptor.MediaType) - } - - return configBlob.Data.(ispec.Image), nil - -} - -// UpdateImageConfig updates an oci tag with new config and new manifest -func UpdateImageConfig(oci casext.Engine, name string, newConfig ispec.Image, newManifest ispec.Manifest) (ispec.Descriptor, error) { - configDigest, configSize, err := oci.PutBlobJSON(context.Background(), newConfig) - if err != nil { - return ispec.Descriptor{}, err - } - - newManifest.Config = ispec.Descriptor{ - MediaType: ispec.MediaTypeImageConfig, - Digest: configDigest, - Size: configSize, - } - - manifestDigest, manifestSize, err := oci.PutBlobJSON(context.Background(), newManifest) - if err != nil { - return ispec.Descriptor{}, err - } - - desc := ispec.Descriptor{ - MediaType: ispec.MediaTypeImageManifest, - Digest: manifestDigest, - Size: manifestSize, - } - - err = oci.UpdateReference(context.Background(), name, desc) - if err != nil { - return ispec.Descriptor{}, err - } - - return desc, nil -} diff --git a/pkg/overlay/metadata.go b/pkg/overlay/metadata.go index 43906a2a..27f4daa6 100644 --- a/pkg/overlay/metadata.go +++ b/pkg/overlay/metadata.go @@ -10,8 +10,8 @@ import ( ispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/umoci/oci/casext" "github.com/pkg/errors" + stackeroci "machinerun.io/atomfs/oci" "stackerbuild.io/stacker/pkg/log" - stackeroci "stackerbuild.io/stacker/pkg/oci" "stackerbuild.io/stacker/pkg/types" ) diff --git a/pkg/overlay/pack.go b/pkg/overlay/pack.go index 94df76c2..a9b30fbc 100644 --- a/pkg/overlay/pack.go +++ b/pkg/overlay/pack.go @@ -22,10 +22,10 @@ import ( "github.com/opencontainers/umoci/oci/layer" "github.com/pkg/errors" "github.com/pkg/xattr" + stackeroci "machinerun.io/atomfs/oci" + "machinerun.io/atomfs/squashfs" "stackerbuild.io/stacker/pkg/lib" "stackerbuild.io/stacker/pkg/log" - stackeroci "stackerbuild.io/stacker/pkg/oci" - "stackerbuild.io/stacker/pkg/squashfs" "stackerbuild.io/stacker/pkg/storage" "stackerbuild.io/stacker/pkg/types" ) @@ -382,7 +382,7 @@ func stripOverlayAttrsUnder(dirPath string) error { }) } -func generateLayer(config types.StackerConfig, oci casext.Engine, mutators []*mutate.Mutator, +func generateLayer(config types.StackerConfig, _ casext.Engine, mutators []*mutate.Mutator, name string, layer types.Layer, layerTypes []types.LayerType, ) (bool, error) { dir := path.Join(config.RootFSDir, name, "overlay") diff --git a/pkg/squashfs/mediatype.go b/pkg/squashfs/mediatype.go deleted file mode 100644 index 051fe9bc..00000000 --- a/pkg/squashfs/mediatype.go +++ /dev/null @@ -1,37 +0,0 @@ -package squashfs - -import ( - "fmt" - "strings" -) - -type SquashfsCompression string -type VerityMetadata bool - -const ( - BaseMediaTypeLayerSquashfs = "application/vnd.stacker.image.layer.squashfs" - - GzipCompression SquashfsCompression = "gzip" - ZstdCompression SquashfsCompression = "zstd" - - veritySuffix = "verity" - - VerityMetadataPresent VerityMetadata = true - VerityMetadataMissing VerityMetadata = false -) - -func IsSquashfsMediaType(mediaType string) bool { - return strings.HasPrefix(mediaType, BaseMediaTypeLayerSquashfs) -} - -func GenerateSquashfsMediaType(comp SquashfsCompression, verity VerityMetadata) string { - verityString := "" - if verity { - verityString = fmt.Sprintf("+%s", veritySuffix) - } - return fmt.Sprintf("%s+%s%s", BaseMediaTypeLayerSquashfs, comp, verityString) -} - -func HasVerityMetadata(mediaType string) VerityMetadata { - return VerityMetadata(strings.HasSuffix(mediaType, veritySuffix)) -} diff --git a/pkg/squashfs/squashfs.go b/pkg/squashfs/squashfs.go deleted file mode 100644 index c562a732..00000000 --- a/pkg/squashfs/squashfs.go +++ /dev/null @@ -1,744 +0,0 @@ -// This package is a small go "library" (read: exec wrapper) around the -// mksquashfs binary that provides some useful primitives. -package squashfs - -import ( - "bytes" - "fmt" - "io" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "sync" - "syscall" - "time" - - "github.com/Masterminds/semver/v3" - "github.com/pkg/errors" - "golang.org/x/sys/unix" - "stackerbuild.io/stacker/pkg/log" - "stackerbuild.io/stacker/pkg/mount" -) - -var checkZstdSupported sync.Once -var zstdIsSuspported bool - -var exPolInfo struct { - once sync.Once - err error - policy *ExtractPolicy -} - -// ExcludePaths represents a list of paths to exclude in a squashfs listing. -// Users should do something like filepath.Walk() over the whole filesystem, -// calling AddExclude() or AddInclude() based on whether they want to include -// or exclude a particular file. Note that if e.g. /usr is excluded, then -// everyting underneath is also implicitly excluded. The -// AddExclude()/AddInclude() methods do the math to figure out what is the -// correct set of things to exclude or include based on what paths have been -// previously included or excluded. -type ExcludePaths struct { - exclude map[string]bool - include []string -} - -type squashFuseInfoStruct struct { - Path string - Version string - SupportsNotfiy bool -} - -var once sync.Once -var squashFuseInfo = squashFuseInfoStruct{"", "", false} - -func NewExcludePaths() *ExcludePaths { - return &ExcludePaths{ - exclude: map[string]bool{}, - include: []string{}, - } -} - -func (eps *ExcludePaths) AddExclude(p string) { - for _, inc := range eps.include { - // If /usr/bin/ls has changed but /usr hasn't, we don't want to list - // /usr in the include paths any more, so let's be sure to only - // add things which aren't prefixes. - if strings.HasPrefix(inc, p) { - return - } - } - eps.exclude[p] = true -} - -func (eps *ExcludePaths) AddInclude(orig string, isDir bool) { - // First, remove this thing and all its parents from exclude. - p := orig - - // normalize to the first dir - if !isDir { - p = path.Dir(p) - } - for { - // our paths are all absolute, so this is a base case - if p == "/" { - break - } - - delete(eps.exclude, p) - p = filepath.Dir(p) - } - - // now add it to the list of includes, so we don't accidentally re-add - // anything above. - eps.include = append(eps.include, orig) -} - -func (eps *ExcludePaths) String() (string, error) { - var buf bytes.Buffer - for p := range eps.exclude { - _, err := buf.WriteString(p) - if err != nil { - return "", err - } - _, err = buf.WriteString("\n") - if err != nil { - return "", err - } - } - - _, err := buf.WriteString("\n") - if err != nil { - return "", err - } - - return buf.String(), nil -} - -func MakeSquashfs(tempdir string, rootfs string, eps *ExcludePaths, verity VerityMetadata) (io.ReadCloser, string, string, error) { - var excludesFile string - var err error - var toExclude string - var rootHash string - - if eps != nil { - toExclude, err = eps.String() - if err != nil { - return nil, "", rootHash, errors.Wrapf(err, "couldn't create exclude path list") - } - } - - if len(toExclude) != 0 { - excludes, err := os.CreateTemp(tempdir, "stacker-squashfs-exclude-") - if err != nil { - return nil, "", rootHash, err - } - defer os.Remove(excludes.Name()) - - excludesFile = excludes.Name() - _, err = excludes.WriteString(toExclude) - excludes.Close() - if err != nil { - return nil, "", rootHash, err - } - } - - tmpSquashfs, err := os.CreateTemp(tempdir, "stacker-squashfs-img-") - if err != nil { - return nil, "", rootHash, err - } - tmpSquashfs.Close() - os.Remove(tmpSquashfs.Name()) - defer os.Remove(tmpSquashfs.Name()) - args := []string{rootfs, tmpSquashfs.Name()} - compression := GzipCompression - if mksquashfsSupportsZstd() { - args = append(args, "-comp", "zstd") - compression = ZstdCompression - } - if len(toExclude) != 0 { - args = append(args, "-ef", excludesFile) - } - cmd := exec.Command("mksquashfs", args...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err = cmd.Run(); err != nil { - return nil, "", rootHash, errors.Wrap(err, "couldn't build squashfs") - } - - if verity { - rootHash, err = appendVerityData(tmpSquashfs.Name()) - if err != nil { - return nil, "", rootHash, err - } - } - - blob, err := os.Open(tmpSquashfs.Name()) - if err != nil { - return nil, "", rootHash, errors.WithStack(err) - } - - return blob, GenerateSquashfsMediaType(compression, verity), rootHash, nil -} - -func isMountedAtDir(src, dest string) (bool, error) { - dstat, err := os.Stat(dest) - if os.IsNotExist(err) { - return false, nil - } - if !dstat.IsDir() { - return false, nil - } - mounts, err := mount.ParseMounts("/proc/self/mountinfo") - if err != nil { - return false, err - } - - fdest, err := filepath.Abs(dest) - if err != nil { - return false, err - } - for _, m := range mounts { - if m.Target == fdest { - return true, nil - } - } - - return false, nil -} - -func findSquashFuseInfo() { - var sqfsPath string - if p := which("squashfuse_ll"); p != "" { - sqfsPath = p - } else { - sqfsPath = which("squashfuse") - } - if sqfsPath == "" { - return - } - version, supportsNotify := sqfuseSupportsMountNotification(sqfsPath) - log.Infof("Found squashfuse at %s (version=%s notify=%t)", sqfsPath, version, supportsNotify) - squashFuseInfo = squashFuseInfoStruct{sqfsPath, version, supportsNotify} -} - -// sqfuseSupportsMountNotification - returns true if squashfuse supports mount -// notification, false otherwise -// sqfuse is the path to the squashfuse binary -func sqfuseSupportsMountNotification(sqfuse string) (string, bool) { - cmd := exec.Command(sqfuse) - - // `squashfuse` always returns an error... so we ignore it. - out, _ := cmd.CombinedOutput() - - firstLine := strings.Split(string(out[:]), "\n")[0] - version := strings.Split(firstLine, " ")[1] - v, err := semver.NewVersion(version) - if err != nil { - return version, false - } - // squashfuse notify mechanism was merged in 0.5.0 - constraint, err := semver.NewConstraint(">= 0.5.0") - if err != nil { - return version, false - } - if constraint.Check(v) { - return version, true - } - return version, false -} - -var squashNotFound = errors.Errorf("squashfuse program not found") - -// squashFuse - mount squashFile to extractDir -// return a pointer to the squashfuse cmd. -// The caller of the this is responsible for the process created. -func squashFuse(squashFile, extractDir string) (*exec.Cmd, error) { - var cmd *exec.Cmd - - once.Do(findSquashFuseInfo) - if squashFuseInfo.Path == "" { - return cmd, squashNotFound - } - - notifyOpts := "" - notifyPath := "" - if squashFuseInfo.SupportsNotfiy { - sockdir, err := os.MkdirTemp("", "sock") - if err != nil { - return cmd, err - } - defer os.RemoveAll(sockdir) - notifyPath = filepath.Join(sockdir, "notifypipe") - if err := syscall.Mkfifo(notifyPath, 0640); err != nil { - return cmd, err - } - notifyOpts = "notify_pipe=" + notifyPath - } - - // given extractDir of path/to/some/dir[/], log to path/to/some/.dir-squashfs.log - extractDir = strings.TrimSuffix(extractDir, "/") - - var cmdOut io.Writer - var err error - - logf := filepath.Join(path.Dir(extractDir), "."+filepath.Base(extractDir)+"-squashfuse.log") - if cmdOut, err = os.OpenFile(logf, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0644); err != nil { - log.Infof("Failed to open %s for write: %v", logf, err) - return cmd, err - } - - fiPre, err := os.Lstat(extractDir) - if err != nil { - return cmd, errors.Wrapf(err, "Failed stat'ing %q", extractDir) - } - if fiPre.Mode()&os.ModeSymlink != 0 { - return cmd, errors.Errorf("Refusing to mount onto a symbolic linkd") - } - - // It would be nice to only enable debug (or maybe to only log to file at all) - // if 'stacker --debug', but we do not have access to that info here. - // to debug squashfuse, use "allow_other,debug" - optionArgs := "allow_other,debug" - if notifyOpts != "" { - optionArgs += "," + notifyOpts - } - cmd = exec.Command(squashFuseInfo.Path, "-f", "-o", optionArgs, squashFile, extractDir) - cmd.Stdin = nil - cmd.Stdout = cmdOut - cmd.Stderr = cmdOut - cmdOut.Write([]byte(fmt.Sprintf("# %s\n", strings.Join(cmd.Args, " ")))) - log.Debugf("Extracting %s -> %s with %s [%s]", squashFile, extractDir, squashFuseInfo.Path, logf) - err = cmd.Start() - if err != nil { - return cmd, err - } - - // now poll/wait for one of 3 things to happen - // a. child process exits - if it did, then some error has occurred. - // b. the directory Entry is different than it was before the call - // to sqfuse. We have to do this because we do not have another - // way to know when the mount has been populated. - // https://github.com/vasi/squashfuse/issues/49 - // c. a timeout (timeLimit) was hit - startTime := time.Now() - timeLimit := 30 * time.Second - alarmCh := make(chan struct{}) - go func() { - cmd.Wait() - close(alarmCh) - }() - if squashFuseInfo.SupportsNotfiy { - notifyCh := make(chan byte) - log.Infof("%s supports notify pipe, watching %q", squashFuseInfo.Path, notifyPath) - go func() { - f, err := os.Open(notifyPath) - if err != nil { - return - } - defer f.Close() - b1 := make([]byte, 1) - for { - n1, err := f.Read(b1) - if err != nil { - return - } - if err == nil && n1 >= 1 { - break - } - } - notifyCh <- b1[0] - }() - if err != nil { - return cmd, errors.Wrapf(err, "Failed reading %q", notifyPath) - } - - select { - case <-alarmCh: - cmd.Process.Kill() - return cmd, errors.Wrapf(err, "Gave up on squashFuse mount of %s with %s after %s", squashFile, squashFuseInfo.Path, timeLimit) - case ret := <-notifyCh: - if ret == 's' { - return cmd, nil - } else { - return cmd, errors.Errorf("squashfuse returned an error, check %s", logf) - } - } - } - for count := 0; !fileChanged(fiPre, extractDir); count++ { - if cmd.ProcessState != nil { - // process exited, the Wait() call in the goroutine above - // caused ProcessState to be populated. - return cmd, errors.Errorf("squashFuse mount of %s with %s exited unexpectedly with %d", squashFile, squashFuseInfo.Path, cmd.ProcessState.ExitCode()) - } - if time.Since(startTime) > timeLimit { - cmd.Process.Kill() - return cmd, errors.Wrapf(err, "Gave up on squashFuse mount of %s with %s after %s", squashFile, squashFuseInfo.Path, timeLimit) - } - if count%10 == 1 { - log.Debugf("%s is not yet mounted...(%s)", extractDir, time.Since(startTime)) - } - time.Sleep(time.Duration(50 * time.Millisecond)) - } - - return cmd, nil -} - -type ExtractPolicy struct { - Extractors []SquashExtractor - Extractor SquashExtractor - Excuses map[string]error - initialized bool - mutex sync.Mutex -} - -type SquashExtractor interface { - Name() string - IsAvailable() error - // Mount - Mount or extract path to dest. - // Return nil on "already extracted" - // Return error on failure. - Mount(path, dest string) error -} - -func NewExtractPolicy(args ...string) (*ExtractPolicy, error) { - p := &ExtractPolicy{ - Extractors: []SquashExtractor{}, - Excuses: map[string]error{}, - } - - allEx := []SquashExtractor{ - &KernelExtractor{}, - &SquashFuseExtractor{}, - &UnsquashfsExtractor{}, - } - byName := map[string]SquashExtractor{} - for _, i := range allEx { - byName[i.Name()] = i - } - - for _, i := range args { - extractor, ok := byName[i] - if !ok { - return nil, errors.Errorf("Unknown extractor: '%s'", i) - } - excuse := extractor.IsAvailable() - if excuse != nil { - p.Excuses[i] = excuse - continue - } - p.Extractors = append(p.Extractors, extractor) - } - return p, nil -} - -type UnsquashfsExtractor struct { - mutex sync.Mutex -} - -func (k *UnsquashfsExtractor) Name() string { - return "unsquashfs" -} - -func (k *UnsquashfsExtractor) IsAvailable() error { - if which("unsquashfs") == "" { - return errors.Errorf("no 'unsquashfs' in PATH") - } - return nil -} - -func (k *UnsquashfsExtractor) Mount(squashFile, extractDir string) error { - k.mutex.Lock() - defer k.mutex.Unlock() - - // check if already extracted - empty, err := isEmptyDir(extractDir) - if err != nil { - return errors.Wrapf(err, "Error checking for empty dir") - } - if !empty { - return nil - } - - log.Debugf("unsquashfs %s -> %s", squashFile, extractDir) - cmd := exec.Command("unsquashfs", "-f", "-d", extractDir, squashFile) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Stdin = nil - err = cmd.Run() - - // on failure, remove the directory - if err != nil { - if rmErr := os.RemoveAll(extractDir); rmErr != nil { - log.Errorf("Failed to remove %s after failed extraction of %s: %v", extractDir, squashFile, rmErr) - } - return err - } - - // assert that extraction must create files. This way we can assume non-empty dir above - // was populated by unsquashfs. - empty, err = isEmptyDir(extractDir) - if err != nil { - return errors.Errorf("Failed to read %s after successful extraction of %s: %v", - extractDir, squashFile, err) - } - if empty { - return errors.Errorf("%s was an empty fs image", squashFile) - } - - return nil -} - -type KernelExtractor struct { - mutex sync.Mutex -} - -func (k *KernelExtractor) Name() string { - return "kmount" -} - -func (k *KernelExtractor) IsAvailable() error { - if !amHostRoot() { - return errors.Errorf("not host root") - } - return nil -} - -func (k *KernelExtractor) Mount(squashFile, extractDir string) error { - k.mutex.Lock() - defer k.mutex.Unlock() - - if mounted, err := isMountedAtDir(squashFile, extractDir); err != nil { - return err - } else if mounted { - return nil - } - - ecmd := []string{"mount", "-tsquashfs", "-oloop,ro", squashFile, extractDir} - var output bytes.Buffer - cmd := exec.Command(ecmd[0], ecmd[1:]...) - cmd.Stdin = nil - cmd.Stdout = &output - cmd.Stderr = cmd.Stdout - err := cmd.Run() - if err == nil { - return nil - } - - var retErr error - - exitError, ok := err.(*exec.ExitError) - if !ok { - retErr = errors.Errorf("kmount(%s) had unexpected error (no-rc), in exec (%v): %v", - squashFile, ecmd, err) - } else if status, ok := exitError.Sys().(syscall.WaitStatus); !ok { - retErr = errors.Errorf("kmount(%s) had unexpected error (no-status), in exec (%v): %v", - squashFile, ecmd, err) - } else { - retErr = errors.Errorf("kmount(%s) exited %d: %v", squashFile, status.ExitStatus(), output.String()) - } - - return retErr -} - -type SquashFuseExtractor struct { - mutex sync.Mutex -} - -func (k *SquashFuseExtractor) Name() string { - return "squashfuse" -} - -func (k *SquashFuseExtractor) IsAvailable() error { - once.Do(findSquashFuseInfo) - if squashFuseInfo.Path == "" { - return errors.Errorf("no 'squashfuse' in PATH") - } - return nil -} - -func (k *SquashFuseExtractor) Mount(squashFile, extractDir string) error { - k.mutex.Lock() - defer k.mutex.Unlock() - - if mounted, err := isMountedAtDir(squashFile, extractDir); mounted && err == nil { - log.Debugf("[%s] %s already mounted -> %s", k.Name(), squashFile, extractDir) - return nil - } else if err != nil { - return err - } - - cmd, err := squashFuse(squashFile, extractDir) - if err != nil { - return err - } - - log.Debugf("squashFuse mounted (%d) %s -> %s", cmd.Process.Pid, squashFile, extractDir) - if err := cmd.Process.Release(); err != nil { - return errors.Errorf("Failed to release process %s: %v", cmd, err) - } - return nil -} - -// ExtractSingleSquashPolicy - extract squashfile to extractDir -func ExtractSingleSquashPolicy(squashFile, extractDir string, policy *ExtractPolicy) error { - const initName = "init" - if policy == nil { - return errors.Errorf("policy cannot be nil") - } - - // avoid taking a lock if already initialized (possibly premature optimization) - if !policy.initialized { - policy.mutex.Lock() - // We may have been waiting on the initializer. If so, then the policy will now be initialized. - // if not, then we are the initializer. - if !policy.initialized { - defer policy.mutex.Unlock() - defer func() { - policy.initialized = true - }() - } else { - policy.mutex.Unlock() - } - } - - err := os.MkdirAll(extractDir, 0755) - if err != nil { - return err - } - - fdest, err := filepath.Abs(extractDir) - if err != nil { - return err - } - - if policy.initialized { - if err, ok := policy.Excuses[initName]; ok { - return err - } - return policy.Extractor.Mount(squashFile, fdest) - } - - // At this point we are the initialzer - if policy.Excuses == nil { - policy.Excuses = map[string]error{} - } - - if len(policy.Extractors) == 0 { - policy.Excuses[initName] = errors.Errorf("policy had no extractors") - return policy.Excuses[initName] - } - - var extractor SquashExtractor - allExcuses := []string{} - for _, extractor = range policy.Extractors { - err = extractor.Mount(squashFile, fdest) - if err == nil { - policy.Extractor = extractor - log.Debugf("Selected squashfs extractor %s", extractor.Name()) - return nil - } - policy.Excuses[extractor.Name()] = err - } - - for n, exc := range policy.Excuses { - allExcuses = append(allExcuses, fmt.Sprintf("%s: %v", n, exc)) - } - - // nothing worked. populate Excuses[initName] - policy.Excuses[initName] = errors.Errorf("No suitable extractor found:\n %s", strings.Join(allExcuses, "\n ")) - return policy.Excuses[initName] -} - -// ExtractSingleSquash - extract the squashFile to extractDir -// Initialize a extractPolicy struct and then call ExtractSingleSquashPolicy -// wik()th that. -func ExtractSingleSquash(squashFile string, extractDir string) error { - exPolInfo.once.Do(func() { - const envName = "STACKER_SQUASHFS_EXTRACT_POLICY" - const defPolicy = "kmount squashfuse unsquashfs" - val := os.Getenv(envName) - if val == "" { - val = defPolicy - } - exPolInfo.policy, exPolInfo.err = NewExtractPolicy(strings.Fields(val)...) - if exPolInfo.err == nil { - for k, v := range exPolInfo.policy.Excuses { - log.Debugf(" squashfs extractor %s is not available: %v", k, v) - } - } - }) - - if exPolInfo.err != nil { - return exPolInfo.err - } - - return ExtractSingleSquashPolicy(squashFile, extractDir, exPolInfo.policy) -} - -func mksquashfsSupportsZstd() bool { - checkZstdSupported.Do(func() { - var stdoutBuffer strings.Builder - var stderrBuffer strings.Builder - - cmd := exec.Command("mksquashfs", "--help") - cmd.Stdout = &stdoutBuffer - cmd.Stderr = &stderrBuffer - - // Ignore errs here as `mksquashfs --help` exit status code is 1 - _ = cmd.Run() - - if strings.Contains(stdoutBuffer.String(), "zstd") || - strings.Contains(stderrBuffer.String(), "zstd") { - zstdIsSuspported = true - } - }) - - return zstdIsSuspported -} - -func isEmptyDir(path string) (bool, error) { - fh, err := os.Open(path) - if err != nil { - return false, err - } - - _, err = fh.ReadDir(1) - if err == io.EOF { - return true, nil - } - return false, err -} - -// which - like the unix utility, return empty string for not-found. -// this might fit well in lib/, but currently lib's test imports -// squashfs creating a import loop. -func which(name string) string { - return whichSearch(name, strings.Split(os.Getenv("PATH"), ":")) -} - -func whichSearch(name string, paths []string) string { - var search []string - - if strings.ContainsRune(name, os.PathSeparator) { - if filepath.IsAbs(name) { - search = []string{name} - } else { - search = []string{"./" + name} - } - } else { - search = []string{} - for _, p := range paths { - search = append(search, filepath.Join(p, name)) - } - } - - for _, fPath := range search { - if err := unix.Access(fPath, unix.X_OK); err == nil { - return fPath - } - } - - return "" -} diff --git a/pkg/squashfs/superblock.go b/pkg/squashfs/superblock.go deleted file mode 100644 index a733ab54..00000000 --- a/pkg/squashfs/superblock.go +++ /dev/null @@ -1,263 +0,0 @@ -/* -This file was initially copied from go-diskfs [1]. The copied portion is -Copyright (c) 2017 Avi Deitcher and licensed under the terms of the MIT -license [2]. - - [1] https://github.com/diskfs/go-diskfs/filesystem/squashfs/superblock.go - [2] https://opensource.org/licenses/MIT -*/ -package squashfs - -import ( - "encoding/binary" - "io" - "math" - "os" - "time" - - "github.com/pkg/errors" -) - -const ( - superblockMagic uint32 = 0x73717368 - superblockMajorVersion uint16 = 4 - superblockMinorVersion uint16 = 0 -) - -type compression uint16 - -// nolint:unused // copied from diskfs -const ( - compressionNone compression = 0 - compressionGzip compression = 1 - compressionLzma compression = 2 - compressionLzo compression = 3 - compressionXz compression = 4 - compressionLz4 compression = 5 - compressionZstd compression = 6 -) - -const ( - superblockSize = 96 -) - -type inodeRef struct { - block uint32 - offset uint16 -} - -// nolint:unused // copied from diskfs -func (i *inodeRef) toUint64() uint64 { - var u uint64 - u |= (uint64(i.block) << 16) - u |= uint64(i.offset) - return u -} - -func parseRootInode(u uint64) *inodeRef { - i := &inodeRef{ - block: uint32((u >> 16) & 0xffffffff), - offset: uint16(u & 0xffff), - } - return i -} - -type superblockFlags struct { - uncompressedInodes bool - uncompressedData bool - uncompressedFragments bool - noFragments bool - alwaysFragments bool - dedup bool - exportable bool - uncompressedXattrs bool - noXattrs bool - compressorOptions bool - uncompressedIDs bool -} - -type superblock struct { - inodes uint32 - modTime time.Time - blocksize uint32 - fragmentCount uint32 - compression compression - idCount uint16 - versionMajor uint16 - versionMinor uint16 - rootInode *inodeRef - size uint64 - idTableStart uint64 - xattrTableStart uint64 - inodeTableStart uint64 - directoryTableStart uint64 - fragmentTableStart uint64 - exportTableStart uint64 - superblockFlags -} - -// nolint:unused // copied from diskfs -func (s *superblock) equal(a *superblock) bool { - // to compare, need to extract the rootInode - inodeEql := *a.rootInode == *s.rootInode - s1 := &superblock{} - a1 := &superblock{} - *s1 = *s - *a1 = *a - s1.rootInode = nil - a1.rootInode = nil - modTime := time.Now() - s1.modTime = modTime - a1.modTime = modTime - sblockEql := *s1 == *a1 - return inodeEql && sblockEql -} - -// nolint:unused // copied from diskfs -func (s *superblockFlags) bytes() []byte { - var flags uint16 - if s.uncompressedInodes { - flags |= 0x0001 - } - if s.uncompressedData { - flags |= 0x0002 - } - if s.uncompressedFragments { - flags |= 0x0008 - } - if s.noFragments { - flags |= 0x0010 - } - if s.alwaysFragments { - flags |= 0x0020 - } - if s.dedup { - flags |= 0x0040 - } - if s.exportable { - flags |= 0x0080 - } - if s.uncompressedXattrs { - flags |= 0x0100 - } - if s.noXattrs { - flags |= 0x0200 - } - if s.compressorOptions { - flags |= 0x0400 - } - if s.uncompressedIDs { - flags |= 0x0800 - } - b := make([]byte, 2) - binary.LittleEndian.PutUint16(b, flags) - return b -} - -func parseFlags(b []byte) (*superblockFlags, error) { - targetLength := 2 - if len(b) != targetLength { - return nil, errors.Errorf("received %d bytes instead of expected %d", len(b), targetLength) - } - flags := binary.LittleEndian.Uint16(b) - s := &superblockFlags{ - uncompressedInodes: flags&0x0001 == 0x0001, - uncompressedData: flags&0x0002 == 0x0002, - uncompressedFragments: flags&0x0008 == 0x0008, - noFragments: flags&0x0010 == 0x0010, - alwaysFragments: flags&0x0020 == 0x0020, - dedup: flags&0x0040 == 0x0040, - exportable: flags&0x0080 == 0x0080, - uncompressedXattrs: flags&0x0100 == 0x0100, - noXattrs: flags&0x0200 == 0x0200, - compressorOptions: flags&0x0400 == 0x0400, - uncompressedIDs: flags&0x0800 == 0x0800, - } - return s, nil -} - -// nolint:unused // copied from diskfs -func (s *superblock) toBytes() []byte { - b := make([]byte, superblockSize) - binary.LittleEndian.PutUint32(b[0:4], superblockMagic) - binary.LittleEndian.PutUint32(b[4:8], s.inodes) - binary.LittleEndian.PutUint32(b[8:12], uint32(s.modTime.Unix())) - binary.LittleEndian.PutUint32(b[12:16], s.blocksize) - binary.LittleEndian.PutUint32(b[16:20], s.fragmentCount) - binary.LittleEndian.PutUint16(b[20:22], uint16(s.compression)) - binary.LittleEndian.PutUint16(b[22:24], uint16(math.Log2(float64(s.blocksize)))) - copy(b[24:26], s.superblockFlags.bytes()) - binary.LittleEndian.PutUint16(b[26:28], s.idCount) - binary.LittleEndian.PutUint16(b[28:30], superblockMajorVersion) - binary.LittleEndian.PutUint16(b[30:32], superblockMinorVersion) - binary.LittleEndian.PutUint64(b[32:40], s.rootInode.toUint64()) - binary.LittleEndian.PutUint64(b[40:48], s.size) - binary.LittleEndian.PutUint64(b[48:56], s.idTableStart) - binary.LittleEndian.PutUint64(b[56:64], s.xattrTableStart) - binary.LittleEndian.PutUint64(b[64:72], s.inodeTableStart) - binary.LittleEndian.PutUint64(b[72:80], s.directoryTableStart) - binary.LittleEndian.PutUint64(b[80:88], s.fragmentTableStart) - binary.LittleEndian.PutUint64(b[88:96], s.exportTableStart) - return b -} - -func parseSuperblock(b []byte) (*superblock, error) { - if len(b) != superblockSize { - return nil, errors.Errorf("superblock had %d bytes instead of expected %d", len(b), superblockSize) - } - magic := binary.LittleEndian.Uint32(b[0:4]) - if magic != superblockMagic { - return nil, errors.Errorf("superblock had magic of %d instead of expected %d", magic, superblockMagic) - } - majorVersion := binary.LittleEndian.Uint16(b[28:30]) - minorVersion := binary.LittleEndian.Uint16(b[30:32]) - if majorVersion != superblockMajorVersion || minorVersion != superblockMinorVersion { - return nil, errors.Errorf("superblock version mismatch, received %d.%d instead of expected %d.%d", majorVersion, minorVersion, superblockMajorVersion, superblockMinorVersion) - } - - blocksize := binary.LittleEndian.Uint32(b[12:16]) - blocklog := binary.LittleEndian.Uint16(b[22:24]) - expectedLog := uint16(math.Log2(float64(blocksize))) - if expectedLog != blocklog { - return nil, errors.Errorf("superblock block log mismatch, actual %d expected %d", blocklog, expectedLog) - } - flags, err := parseFlags(b[24:26]) - if err != nil { - return nil, errors.Errorf("error parsing flags bytes: %v", err) - } - s := &superblock{ - inodes: binary.LittleEndian.Uint32(b[4:8]), - modTime: time.Unix(int64(binary.LittleEndian.Uint32(b[8:12])), 0), - blocksize: blocksize, - fragmentCount: binary.LittleEndian.Uint32(b[16:20]), - compression: compression(binary.LittleEndian.Uint16(b[20:22])), - idCount: binary.LittleEndian.Uint16(b[26:28]), - versionMajor: binary.LittleEndian.Uint16(b[28:30]), - versionMinor: binary.LittleEndian.Uint16(b[30:32]), - rootInode: parseRootInode(binary.LittleEndian.Uint64(b[32:40])), - size: binary.LittleEndian.Uint64(b[40:48]), - idTableStart: binary.LittleEndian.Uint64(b[48:56]), - xattrTableStart: binary.LittleEndian.Uint64(b[56:64]), - inodeTableStart: binary.LittleEndian.Uint64(b[64:72]), - directoryTableStart: binary.LittleEndian.Uint64(b[72:80]), - fragmentTableStart: binary.LittleEndian.Uint64(b[80:88]), - exportTableStart: binary.LittleEndian.Uint64(b[88:96]), - superblockFlags: *flags, - } - return s, nil -} - -func readSuperblock(path string) (*superblock, error) { - reader, err := os.Open(path) - if err != nil { - return nil, err - } - defer reader.Close() - - buf := make([]byte, superblockSize) - if _, err := io.ReadFull(reader, buf); err != nil { - return nil, err - } - - return parseSuperblock(buf) -} diff --git a/pkg/squashfs/verity.go b/pkg/squashfs/verity.go deleted file mode 100644 index 388ca0bd..00000000 --- a/pkg/squashfs/verity.go +++ /dev/null @@ -1,522 +0,0 @@ -package squashfs - -// #cgo pkg-config: libcryptsetup devmapper --static -// #include -// #include -// #include -// #include -/* -int get_verity_params(char *device, char **params) -{ - struct dm_task *dmt; - struct dm_info dmi; - int r; - uint64_t start, length; - char *type, *tmpParams; - - dmt = dm_task_create(DM_DEVICE_TABLE); - if (!dmt) - return 1; - - r = 2; - if (!dm_task_secure_data(dmt)) - goto out; - - r = 3; - if (!dm_task_set_name(dmt, device)) - goto out; - - r = 4; - if (!dm_task_run(dmt)) - goto out; - - r = 5; - if (!dm_task_get_info(dmt, &dmi)) - goto out; - - r = 6; - if (!dmi.exists) - goto out; - - r = 7; - if (dmi.target_count <= 0) - goto out; - - r = 8; - dm_get_next_target(dmt, NULL, &start, &length, &type, &tmpParams); - if (!type) - goto out; - - r = 9; - if (strcasecmp(type, CRYPT_VERITY)) { - fprintf(stderr, "type: %s (%s) %d\n", type, CRYPT_VERITY, strcmp(type, CRYPT_VERITY)); - goto out; - } - *params = strdup(tmpParams); - - r = 0; -out: - dm_task_destroy(dmt); - return r; -} -*/ -import "C" - -import ( - "encoding/hex" - "fmt" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "syscall" - "unsafe" - - "github.com/freddierice/go-losetup" - "github.com/martinjungblut/go-cryptsetup" - "github.com/pkg/errors" - "golang.org/x/sys/unix" - "stackerbuild.io/stacker/pkg/mount" -) - -const VerityRootHashAnnotation = "io.stackeroci.stacker.squashfs_verity_root_hash" - -type verityDeviceType struct { - Flags uint - DataDevice string - HashOffset uint64 -} - -func (verity verityDeviceType) Name() string { - return C.CRYPT_VERITY -} - -func (verity verityDeviceType) Unmanaged() (unsafe.Pointer, func()) { - var cParams C.struct_crypt_params_verity - - cParams.hash_name = C.CString("sha256") - cParams.data_device = C.CString(verity.DataDevice) - cParams.fec_device = nil - cParams.fec_roots = 0 - - cParams.salt_size = 32 // DEFAULT_VERITY_SALT_SIZE for x86 - cParams.salt = nil - - // these can't be larger than a page size, but we want them to be as - // big as possible so the hash data is small, so let's set them to a - // page size. - cParams.data_block_size = C.uint(os.Getpagesize()) - cParams.hash_block_size = C.uint(os.Getpagesize()) - - cParams.data_size = C.ulong(verity.HashOffset / uint64(os.Getpagesize())) - cParams.hash_area_offset = C.ulong(verity.HashOffset) - cParams.fec_area_offset = 0 - cParams.hash_type = 1 // use format version 1 (i.e. "modern", non chrome-os) - cParams.flags = C.uint(verity.Flags) - - deallocate := func() { - C.free(unsafe.Pointer(cParams.hash_name)) - C.free(unsafe.Pointer(cParams.data_device)) - } - - return unsafe.Pointer(&cParams), deallocate -} - -func isCryptsetupEINVAL(err error) bool { - cse, ok := err.(*cryptsetup.Error) - return ok && cse.Code() == -22 -} - -var cryptsetupTooOld = errors.Errorf("libcryptsetup not new enough, need >= 2.3.0") - -func appendVerityData(file string) (string, error) { - fi, err := os.Lstat(file) - if err != nil { - return "", errors.WithStack(err) - } - - verityOffset := fi.Size() - - // we expect mksquashfs to have padded the file to the nearest 4k - // (dm-verity requires device block size, which is 512 for loopback, - // which is a multiple of 4k), let's check that here - if verityOffset%512 != 0 { - return "", errors.Errorf("bad verity file size %d", verityOffset) - } - - verityDevice, err := cryptsetup.Init(file) - if err != nil { - return "", errors.WithStack(err) - } - - verityType := verityDeviceType{ - Flags: cryptsetup.CRYPT_VERITY_CREATE_HASH, - DataDevice: file, - HashOffset: uint64(verityOffset), - } - err = verityDevice.Format(verityType, cryptsetup.GenericParams{}) - if err != nil { - return "", errors.WithStack(err) - } - - // a bit ugly, but this is the only API for querying the root - // hash (short of invoking the veritysetup binary), and it was - // added in libcryptsetup commit 188cb114af94 ("Add support for - // verity in crypt_volume_key_get and use it in status"), which - // is relatively recent (ubuntu 20.04 does not have this patch, - // for example). - // - // before that, we get a -22. so, let's test for that and - // render a special error message. - rootHash, _, err := verityDevice.VolumeKeyGet(cryptsetup.CRYPT_ANY_SLOT, "") - if isCryptsetupEINVAL(err) { - return "", cryptsetupTooOld - } else if err != nil { - return "", err - } - - return fmt.Sprintf("%x", rootHash), errors.WithStack(err) -} - -func verityDataLocation(sblock *superblock) (uint64, error) { - squashLen := sblock.size - - // squashfs is padded out to the nearest 4k - if squashLen%4096 != 0 { - squashLen = squashLen + (4096 - squashLen%4096) - } - - return squashLen, nil -} - -func verityName(p string) string { - return fmt.Sprintf("%s-%s", p, veritySuffix) -} - -func fileChanged(a os.FileInfo, path string) bool { - b, err := os.Lstat(path) - if err != nil { - return true - } - return !os.SameFile(a, b) -} - -// Mount a filesystem as container root, without host root -// privileges. We do this using squashfuse. -func GuestMount(squashFile string, mountpoint string) error { - if isMountpoint(mountpoint) { - return errors.Errorf("%s is already mounted", mountpoint) - } - - abs, err := filepath.Abs(squashFile) - if err != nil { - return errors.Errorf("Failed to get absolute path for %s: %v", squashFile, err) - } - squashFile = abs - - abs, err = filepath.Abs(mountpoint) - if err != nil { - return errors.Errorf("Failed to get absolute path for %s: %v", mountpoint, err) - } - mountpoint = abs - - cmd, err := squashFuse(squashFile, mountpoint) - if err != nil { - return err - } - if err := cmd.Process.Release(); err != nil { - return errors.Errorf("Failed to release process after guestmount %s: %v", squashFile, err) - } - return nil -} - -func isMountpoint(dest string) bool { - mounted, err := mount.IsMountpoint(dest) - return err == nil && mounted -} - -// Takes /proc/self/uid_map contents as one string -// Returns true if this is a uidmap representing the whole host -// uid range. -func uidmapIsHost(oneline string) bool { - oneline = strings.TrimSuffix(oneline, "\n") - if len(oneline) == 0 { - return false - } - lines := strings.Split(oneline, "\n") - if len(lines) != 1 { - return false - } - words := strings.Fields(lines[0]) - if len(words) != 3 || words[0] != "0" || words[1] != "0" || words[2] != "4294967295" { - return false - } - - return true -} - -func amHostRoot() bool { - // if not uid 0, not host root - if os.Geteuid() != 0 { - return false - } - // if uid_map doesn't map 0 to 0, not host root - bytes, err := os.ReadFile("/proc/self/uid_map") - if err != nil { - return false - } - return uidmapIsHost(string(bytes)) -} - -func Mount(squashfs, mountpoint, rootHash string) error { - if !amHostRoot() { - return GuestMount(squashfs, mountpoint) - } - err := HostMount(squashfs, mountpoint, rootHash) - if err == nil || rootHash != "" { - return err - } - return GuestMount(squashfs, mountpoint) -} - -func HostMount(squashfs string, mountpoint string, rootHash string) error { - fi, err := os.Stat(squashfs) - if err != nil { - return errors.WithStack(err) - } - - sblock, err := readSuperblock(squashfs) - if err != nil { - return err - } - - verityOffset, err := verityDataLocation(sblock) - if err != nil { - return err - } - - if verityOffset == uint64(fi.Size()) && rootHash != "" { - return errors.Errorf("asked for verity but no data present") - } - - if rootHash == "" && verityOffset != uint64(fi.Size()) { - return errors.Errorf("verity data present but no root hash specified") - } - - mountSourcePath := "" - - var verityDevice *cryptsetup.Device - name := verityName(path.Base(squashfs)) - - loopDevNeedsClosedOnErr := false - var loopDev losetup.Device - - // set up the verity device if necessary - if rootHash != "" { - verityDevPath := path.Join("/dev/mapper", name) - mountSourcePath = verityDevPath - _, err = os.Stat(verityDevPath) - if err != nil { - if !os.IsNotExist(err) { - return errors.WithStack(err) - } - - loopDev, err = losetup.Attach(squashfs, 0, true) - if err != nil { - return errors.WithStack(err) - } - loopDevNeedsClosedOnErr = true - - verityDevice, err = cryptsetup.Init(loopDev.Path()) - if err != nil { - return errors.WithStack(err) - } - - verityType := verityDeviceType{ - Flags: 0, - DataDevice: loopDev.Path(), - HashOffset: verityOffset, - } - - err = verityDevice.Load(verityType) - if err != nil { - loopDev.Detach() - return errors.WithStack(err) - } - - // each string byte hex encodes four bits of info... - volumeKeySizeInBytes := len(rootHash) * 4 / 8 - rootHashBytes, err := hex.DecodeString(rootHash) - if err != nil { - loopDev.Detach() - return errors.WithStack(err) - } - - if len(rootHashBytes) != volumeKeySizeInBytes { - loopDev.Detach() - return errors.Errorf("unexpected key size for %s", rootHash) - } - - err = verityDevice.ActivateByVolumeKey(name, string(rootHashBytes), volumeKeySizeInBytes, cryptsetup.CRYPT_ACTIVATE_READONLY) - if err != nil { - loopDev.Detach() - return errors.WithStack(err) - } - } else { - err = ConfirmExistingVerityDeviceHash(verityDevPath, rootHash, rejectVerityFailure) - if err != nil { - return err - } - } - } else { - loopDev, err = losetup.Attach(squashfs, 0, true) - if err != nil { - return errors.WithStack(err) - } - defer loopDev.Detach() - mountSourcePath = loopDev.Path() - - } - - err = errors.WithStack(unix.Mount(mountSourcePath, mountpoint, "squashfs", unix.MS_RDONLY, "")) - if err != nil { - if verityDevice != nil { - verityDevice.Deactivate(name) - loopDev.Detach() - } - if loopDevNeedsClosedOnErr { - loopDev.Detach() - } - return err - } - return nil -} - -func findLoopBackingVerity(device string) (int64, error) { - fi, err := os.Stat(device) - if err != nil { - return -1, errors.WithStack(err) - } - - var minor uint32 - switch stat := fi.Sys().(type) { - case *unix.Stat_t: - minor = unix.Minor(uint64(stat.Rdev)) - case *syscall.Stat_t: - minor = unix.Minor(uint64(stat.Rdev)) - default: - return -1, errors.Errorf("unknown stat info type %T", stat) - } - - ents, err := os.ReadDir(fmt.Sprintf("/sys/block/dm-%d/slaves", minor)) - if err != nil { - return -1, errors.WithStack(err) - } - - if len(ents) != 1 { - return -1, errors.Errorf("too many slaves for %v", device) - } - loop := ents[0] - - deviceNo, err := strconv.ParseInt(strings.TrimPrefix(filepath.Base(loop.Name()), "loop"), 10, 64) - if err != nil { - return -1, errors.Wrapf(err, "bad loop dev %v", loop.Name()) - } - - return deviceNo, nil -} - -func Umount(mountpoint string) error { - mounts, err := mount.ParseMounts("/proc/self/mountinfo") - if err != nil { - return err - } - - // first, find the verity device that backs the mount - theMount, found := mounts.FindMount(mountpoint) - if !found { - return errors.Errorf("%s is not a mountpoint", mountpoint) - } - - err = unix.Unmount(mountpoint, 0) - if err != nil { - return errors.Wrapf(err, "failed unmounting %v", mountpoint) - } - - if _, err := os.Stat(theMount.Source); err != nil { - if os.IsNotExist(err) { - return nil - } - return errors.WithStack(err) - } - - // was this a verity mount or a regular loopback mount? (if it's a - // regular loopback mount, we detached it above, so need to do anything - // special here; verity doesn't play as nicely) - if strings.HasSuffix(theMount.Source, veritySuffix) { - // find the loop device that backs the verity device - deviceNo, err := findLoopBackingVerity(theMount.Source) - if err != nil { - return err - } - - loopDev := losetup.New(uint64(deviceNo), 0) - // here, we don't have the loopback device any more (we detached it - // above). the cryptsetup API allows us to pass NULL for the crypt - // device, but go-cryptsetup doesn't have a way to initialize a NULL - // crypt device short of making the struct by hand like this. - err = (&cryptsetup.Device{}).Deactivate(theMount.Source) - if err != nil { - return errors.WithStack(err) - } - - // finally, kill the loop dev - err = loopDev.Detach() - if err != nil { - return errors.Wrapf(err, "failed to detach loop dev for %v", theMount.Source) - } - } - - return nil -} - -// If we are using squashfuse, then we will be unable to get verity has from -// the mount device. This is not a safe thing, we we only allow it when the -// device was mounted originally with AllowMissingVerityData. - -const ( - rejectVerityFailure = false - allowVerityFailure = false -) - -func ConfirmExistingVerityDeviceHash(devicePath string, rootHash string, allowVerityFailure bool) error { - device := filepath.Base(devicePath) - cDevice := C.CString(device) - defer C.free(unsafe.Pointer(cDevice)) - - var cParams *C.char - - rc := C.get_verity_params(cDevice, &cParams) - if rc != 0 { - if allowVerityFailure { - return nil - } - return errors.Errorf("problem getting hash from %v: %v", device, rc) - } - defer C.free(unsafe.Pointer(cParams)) - - params := C.GoString(cParams) - - // https://gitlab.com/cryptsetup/cryptsetup/-/wikis/DMVerity - fields := strings.Fields(params) - if len(fields) < 10 { - return errors.Errorf("invalid dm params for %v: %v", device, params) - } - - if rootHash != fields[8] { - return errors.Errorf("invalid root hash for %v: %v (expected: %v)", device, fields[7], rootHash) - } - - return nil -} diff --git a/pkg/squashfs/verity_static.go b/pkg/squashfs/verity_static.go deleted file mode 100644 index feac991b..00000000 --- a/pkg/squashfs/verity_static.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build static_build -// +build static_build - -package squashfs - -// cryptsetup's pkgconfig is broken (it does not set Requires.private or -// Libs.private at all), so we do the LDLIBS for it by hand. - -// #cgo LDFLAGS: -lcryptsetup -lcrypto -lssl -lblkid -luuid -ljson-c -lpthread -ldl -import "C" diff --git a/pkg/squashfs/verity_test.go b/pkg/squashfs/verity_test.go deleted file mode 100644 index 8ebdaa15..00000000 --- a/pkg/squashfs/verity_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package squashfs - -import ( - "fmt" - "io" - "os" - "os/exec" - "path" - "testing" - - "github.com/stretchr/testify/assert" -) - -type uidmapTestcase struct { - uidmap string - expected bool -} - -var uidmapTests = []uidmapTestcase{ - uidmapTestcase{ - uidmap: ` 0 0 4294967295`, - expected: true, - }, - uidmapTestcase{ - uidmap: ` 0 0 1000 -2000 2000 1`, - expected: false, - }, - uidmapTestcase{ - uidmap: ` 0 0 1000`, - expected: false, - }, - uidmapTestcase{ - uidmap: ` 10 0 4294967295`, - expected: false, - }, - uidmapTestcase{ - uidmap: ` 0 10 4294967295`, - expected: false, - }, - uidmapTestcase{ - uidmap: ` 0 0 1`, - expected: false, - }, -} - -func TestAmHostRoot(t *testing.T) { - assert := assert.New(t) - for _, testcase := range uidmapTests { - v := uidmapIsHost(testcase.uidmap) - assert.Equal(v, testcase.expected) - } -} - -func TestVerityMetadata(t *testing.T) { - assert := assert.New(t) - - rootfs, err := os.MkdirTemp("", "stacker_verity_test_rootfs") - assert.NoError(err) - defer os.RemoveAll(rootfs) - - tempdir, err := os.MkdirTemp("", "stacker_verity_test_tempdir") - assert.NoError(err) - defer os.RemoveAll(tempdir) - - err = os.WriteFile(path.Join(rootfs, "foo"), []byte("bar"), 0644) - assert.NoError(err) - - reader, _, rootHash, err := MakeSquashfs(tempdir, rootfs, nil, VerityMetadataPresent) - if err == cryptsetupTooOld { - t.Skip("libcryptsetup too old") - } - assert.NoError(err) - - content, err := io.ReadAll(reader) - assert.NoError(err) - squashfsFile := path.Join(tempdir, "foo.squashfs") - err = os.WriteFile(squashfsFile, content, 0600) - assert.NoError(err) - - sblock, err := readSuperblock(squashfsFile) - assert.NoError(err) - - verityOffset, err := verityDataLocation(sblock) - assert.NoError(err) - - // now let's try to verify it at least in userspace. exec cryptsetup - // because i'm lazy and it's only in tests - cmd := exec.Command("veritysetup", "verify", squashfsFile, squashfsFile, rootHash, - "--hash-offset", fmt.Sprintf("%d", verityOffset)) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err = cmd.Run() - assert.NoError(err) - - // what if we fiddle with the verity data? note that we have to fiddle - // with the beginning of the verity block, which will be 4k long for - // our small squashfs file, because the stuff at the end of the verity - // block is unused. - const bytesToFlip = 2 - const flipAtOffset = -4087 - - f, err := os.OpenFile(squashfsFile, os.O_RDWR, 0644) - assert.NoError(err) - defer f.Close() - _, err = f.Seek(flipAtOffset, os.SEEK_END) - assert.NoError(err) - - buf := make([]byte, bytesToFlip) - n, err := f.Read(buf) - assert.Equal(n, bytesToFlip) - assert.NoError(err) - - for i := range buf { - buf[i] = buf[i] ^ 0xff - } - - _, err = f.Seek(flipAtOffset, os.SEEK_END) - assert.NoError(err) - n, err = f.Write(buf) - assert.Equal(n, bytesToFlip) - assert.NoError(err) - assert.NoError(f.Sync()) - assert.NoError(f.Close()) - - cmd = exec.Command("veritysetup", "verify", squashfsFile, squashfsFile, rootHash, - "--hash-offset", fmt.Sprintf("%d", verityOffset)) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err = cmd.Run() - assert.Error(err) -} diff --git a/pkg/types/layer_type.go b/pkg/types/layer_type.go index 889e2e4b..8430a404 100644 --- a/pkg/types/layer_type.go +++ b/pkg/types/layer_type.go @@ -7,7 +7,7 @@ import ( ispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "stackerbuild.io/stacker/pkg/squashfs" + "machinerun.io/atomfs/squashfs" ) var ErrEmptyLayers = errors.New("empty layers")