diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 173e8646d19d..7a24a7dabf5e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -135,6 +135,56 @@ jobs: name: coverage path: ./coverage + test-nydus: + runs-on: ubuntu-20.04 + needs: [base] + strategy: + fail-fast: false + matrix: + pkg: + - ./client + worker: + - containerd + - oci + typ: + - integration + exclude: + - pkg: ./client ./cmd/buildctl ./worker/containerd ./solver ./frontend + typ: dockerfile + include: + - pkg: ./... + skip-integration-tests: 1 + typ: integration + steps: + - + name: Checkout + uses: actions/checkout@v3 + - + name: Expose GitHub Runtime + uses: crazy-max/ghaction-github-runtime@v2 + - + name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - + name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + with: + version: ${{ env.BUILDX_VERSION }} + driver-opts: image=${{ env.REPO_SLUG_ORIGIN }} + buildkitd-flags: --debug + - + name: Test pkg=${{ matrix.pkg }} ; typ=${{ matrix.typ }} ; skipit=${{ matrix.skip-integration-tests }} ; worker=${{ matrix.worker }} + run: | + if [ -n "${{ matrix.worker }}" ]; then + export TESTFLAGS="${TESTFLAGS} --tags=nydus --run=//worker=${{ matrix.worker }}$" + fi + ./hack/test ${{ matrix.typ }} + env: + BUILDKITD_TAGS: nydus + TESTPKGS: ${{ matrix.pkg }} + SKIP_INTEGRATION_TESTS: ${{ matrix.skip-integration-tests }} + CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_IT }} type=gha,scope=${{ env.CACHE_GHA_SCOPE_BINARIES }} + test-s3: runs-on: ubuntu-20.04 needs: diff --git a/Dockerfile b/Dockerfile index 6f81dd98f948..e1d2de6a7125 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,6 +12,7 @@ ARG CNI_VERSION=v1.1.0 ARG STARGZ_SNAPSHOTTER_VERSION=v0.12.0 ARG NERDCTL_VERSION=v0.17.1 ARG DNSNAME_VERSION=v1.3.1 +ARG NYDUS_VERSION=v2.1.0 # ALPINE_VERSION sets version for the base layers ARG ALPINE_VERSION=3.15 @@ -192,6 +193,14 @@ RUN --mount=target=/root/.cache,type=cache \ xx-verify --static /out/containerd-stargz-grpc && \ xx-verify --static /out/ctr-remote +FROM gobuild-base AS nydus +ARG NYDUS_VERSION +ARG TARGETOS +ARG TARGETARCH +SHELL ["/bin/bash", "-c"] +RUN wget https://github.com/dragonflyoss/image-service/releases/download/$NYDUS_VERSION/nydus-static-$NYDUS_VERSION-$TARGETOS-$TARGETARCH.tgz +RUN mkdir -p /out/nydus-static && tar xzvf nydus-static-$NYDUS_VERSION-$TARGETOS-$TARGETARCH.tgz -C /out + FROM buildkit-export AS buildkit-linux COPY --link --from=binaries / /usr/bin/ ENTRYPOINT ["buildkitd"] @@ -235,6 +244,7 @@ RUN apk add --no-cache shadow shadow-uidmap sudo vim iptables ip6tables dnsmasq ENV BUILDKIT_INTEGRATION_CONTAINERD_EXTRA="containerd-1.4=/opt/containerd-alt-14/bin,containerd-1.5=/opt/containerd-alt-15/bin" ENV BUILDKIT_INTEGRATION_SNAPSHOTTER=stargz ENV CGO_ENABLED=0 +COPY --link --from=nydus /out/nydus-static/* /usr/bin/ COPY --link --from=stargz-snapshotter /out/* /usr/bin/ COPY --link --from=rootlesskit /rootlesskit /usr/bin/ COPY --link --from=containerd-alt-14 /out/containerd* /opt/containerd-alt-14/bin/ diff --git a/cache/blobs.go b/cache/blobs.go index 377d680acf27..817c687f4c39 100644 --- a/cache/blobs.go +++ b/cache/blobs.go @@ -86,7 +86,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool return nil, errors.WithStack(ErrNoBlobs) } - compressorFunc, finalize := comp.Type.Compress(comp) + compressorFunc, finalize := comp.Type.Compress(ctx, comp) mediaType := comp.Type.MediaType() var lowerRef *immutableRef diff --git a/cache/blobs_linux.go b/cache/blobs_linux.go index 37310ff3bad6..ce41275e6b74 100644 --- a/cache/blobs_linux.go +++ b/cache/blobs_linux.go @@ -58,11 +58,14 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper if err != nil { return emptyDesc, false, errors.Wrap(err, "failed to get compressed stream") } - err = overlay.WriteUpperdir(ctx, io.MultiWriter(compressed, dgstr.Hash()), upperdir, lower) - compressed.Close() - if err != nil { + // Close ensure compressorFunc does some finalization works. + defer compressed.Close() + if err := overlay.WriteUpperdir(ctx, io.MultiWriter(compressed, dgstr.Hash()), upperdir, lower); err != nil { return emptyDesc, false, errors.Wrap(err, "failed to write compressed diff") } + if err := compressed.Close(); err != nil { + return emptyDesc, false, errors.Wrap(err, "failed to close compressed diff writer") + } if labels == nil { labels = map[string]string{} } diff --git a/cache/compression.go b/cache/compression.go new file mode 100644 index 000000000000..bede8d932278 --- /dev/null +++ b/cache/compression.go @@ -0,0 +1,16 @@ +//go:build !nydus +// +build !nydus + +package cache + +import ( + "context" + + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/cache/config" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func needsForceCompression(ctx context.Context, cs content.Store, source ocispecs.Descriptor, refCfg config.RefConfig) bool { + return refCfg.Compression.Force +} diff --git a/cache/compression_nydus.go b/cache/compression_nydus.go new file mode 100644 index 000000000000..2f5f10bd172c --- /dev/null +++ b/cache/compression_nydus.go @@ -0,0 +1,148 @@ +//go:build nydus +// +build nydus + +package cache + +import ( + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/moby/buildkit/cache/config" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/compression" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + + nydusify "github.com/containerd/nydus-snapshotter/pkg/converter" +) + +func init() { + additionalAnnotations = append( + additionalAnnotations, + nydusify.LayerAnnotationNydusBlob, nydusify.LayerAnnotationNydusBootstrap, nydusify.LayerAnnotationNydusBlobIDs, + ) +} + +// Nydus compression type can't be mixed with other compression types in the same image, +// so if `source` is this kind of layer, but the target is other compression type, we +// should do the forced compression. +func needsForceCompression(ctx context.Context, cs content.Store, source ocispecs.Descriptor, refCfg config.RefConfig) bool { + if refCfg.Compression.Force { + return true + } + isNydusBlob, _ := compression.Nydus.Is(ctx, cs, source) + if refCfg.Compression.Type == compression.Nydus { + return !isNydusBlob + } + return isNydusBlob +} + +// MergeNydus does two steps: +// 1. Extracts nydus bootstrap from nydus format (nydus blob + nydus bootstrap) for each layer. +// 2. Merge all nydus bootstraps into a final bootstrap (will as an extra layer). +// The nydus bootstrap size is very small, so the merge operation is fast. +func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, s session.Group) (*ocispecs.Descriptor, error) { + iref, ok := ref.(*immutableRef) + if !ok { + return nil, fmt.Errorf("unsupported ref") + } + refs := iref.layerChain() + if len(refs) == 0 { + return nil, fmt.Errorf("refs can't be empty") + } + + // Extracts nydus bootstrap from nydus format for each layer. + var cm *cacheManager + layers := []nydusify.Layer{} + blobIDs := []string{} + for _, ref := range refs { + blobDesc, err := getBlobWithCompressionWithRetry(ctx, ref, comp, s) + if err != nil { + return nil, errors.Wrapf(err, "get compression blob %q", comp.Type) + } + ra, err := ref.cm.ContentStore.ReaderAt(ctx, blobDesc) + if err != nil { + return nil, errors.Wrapf(err, "get reader for compression blob %q", comp.Type) + } + defer ra.Close() + if cm == nil { + cm = ref.cm + } + blobIDs = append(blobIDs, blobDesc.Digest.Hex()) + layers = append(layers, nydusify.Layer{ + Digest: blobDesc.Digest, + ReaderAt: ra, + }) + } + + // Merge all nydus bootstraps into a final nydus bootstrap. + pr, pw := io.Pipe() + go func() { + defer pw.Close() + if _, err := nydusify.Merge(ctx, layers, pw, nydusify.MergeOption{ + WithTar: true, + }); err != nil { + pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap")) + } + }() + + // Compress final nydus bootstrap to tar.gz and write into content store. + cw, err := content.OpenWriter(ctx, cm.ContentStore, content.WithRef("nydus-merge-"+iref.getChainID().String())) + if err != nil { + return nil, errors.Wrap(err, "open content store writer") + } + defer cw.Close() + + gw := gzip.NewWriter(cw) + uncompressedDgst := digest.SHA256.Digester() + compressed := io.MultiWriter(gw, uncompressedDgst.Hash()) + if _, err := io.Copy(compressed, pr); err != nil { + return nil, errors.Wrapf(err, "copy bootstrap targz into content store") + } + if err := gw.Close(); err != nil { + return nil, errors.Wrap(err, "close gzip writer") + } + + compressedDgst := cw.Digest() + if err := cw.Commit(ctx, 0, compressedDgst, content.WithLabels(map[string]string{ + containerdUncompressed: uncompressedDgst.Digest().String(), + })); err != nil { + if !errdefs.IsAlreadyExists(err) { + return nil, errors.Wrap(err, "commit to content store") + } + } + if err := cw.Close(); err != nil { + return nil, errors.Wrap(err, "close content store writer") + } + + info, err := cm.ContentStore.Info(ctx, compressedDgst) + if err != nil { + return nil, errors.Wrap(err, "get info from content store") + } + + blobIDsBytes, err := json.Marshal(blobIDs) + if err != nil { + return nil, errors.Wrap(err, "marshal blob ids") + } + + desc := ocispecs.Descriptor{ + Digest: compressedDgst, + Size: info.Size, + MediaType: ocispecs.MediaTypeImageLayerGzip, + Annotations: map[string]string{ + containerdUncompressed: uncompressedDgst.Digest().String(), + // Use this annotation to identify nydus bootstrap layer. + nydusify.LayerAnnotationNydusBootstrap: "true", + // Track all blob digests for nydus snapshotter. + nydusify.LayerAnnotationNydusBlobIDs: string(blobIDsBytes), + }, + } + + return &desc, nil +} diff --git a/cache/converter.go b/cache/converter.go index 8b9c22924b89..f19412b7086a 100644 --- a/cache/converter.go +++ b/cache/converter.go @@ -36,7 +36,7 @@ func getConverter(ctx context.Context, cs content.Store, desc ocispecs.Descripto } c := conversion{target: comp} - c.compress, c.finalize = comp.Type.Compress(comp) + c.compress, c.finalize = comp.Type.Compress(ctx, comp) c.decompress = from.Decompress return (&c).convert, nil diff --git a/cache/refs.go b/cache/refs.go index 37b3bf6d873e..2bb0ba115b6e 100644 --- a/cache/refs.go +++ b/cache/refs.go @@ -36,6 +36,8 @@ import ( "golang.org/x/sync/errgroup" ) +var additionalAnnotations = append(compression.EStargzAnnotations, containerdUncompressed) + // Ref is a reference to cacheable objects. type Ref interface { Mountable @@ -878,7 +880,7 @@ func filterAnnotationsForSave(a map[string]string) (b map[string]string) { if a == nil { return nil } - for _, k := range append(compression.EStargzAnnotations, containerdUncompressed) { + for _, k := range additionalAnnotations { v, ok := a[k] if !ok { continue diff --git a/cache/remote.go b/cache/remote.go index d4eb37f0bca6..843ad249700e 100644 --- a/cache/remote.go +++ b/cache/remote.go @@ -212,7 +212,7 @@ func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, refC } } - if refCfg.Compression.Force { + if needsForceCompression(ctx, sr.cm.ContentStore, desc, refCfg) { if needs, err := refCfg.Compression.Type.NeedsConversion(ctx, sr.cm.ContentStore, desc); err != nil { return nil, err } else if needs { diff --git a/client/client_nydus_test.go b/client/client_nydus_test.go new file mode 100644 index 000000000000..dbe0e38b3970 --- /dev/null +++ b/client/client_nydus_test.go @@ -0,0 +1,139 @@ +//go:build nydus +// +build nydus + +package client + +import ( + "fmt" + "strconv" + "testing" + + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/namespaces" + nydusify "github.com/containerd/nydus-snapshotter/pkg/converter" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/testutil/integration" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +func TestNydusIntegration(t *testing.T) { + testIntegration( + t, + testBuildExportNydusWithHybrid, + ) +} + +func testBuildExportNydusWithHybrid(t *testing.T, sb integration.Sandbox) { + integration.SkipIfDockerd(t, sb, "nydus build export") + requiresLinux(t) + + cdAddress := sb.ContainerdAddress() + if cdAddress == "" { + t.Skip("test requires containerd worker") + } + + client, err := newContainerd(cdAddress) + require.NoError(t, err) + defer client.Close() + registry, err := sb.NewRegistry() + if errors.Is(err, integration.ErrRequirements) { + t.Skip(err.Error()) + } + require.NoError(t, err) + + var ( + imageService = client.ImageService() + contentStore = client.ContentStore() + ctx = namespaces.WithNamespace(sb.Context(), "buildkit") + ) + + c, err := New(sb.Context(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + buildNydus := func(file string) { + orgImage := "docker.io/library/alpine:latest" + baseDef := llb.Image(orgImage).Run(llb.Args([]string{"/bin/touch", "/" + file})) + def, err := baseDef.Marshal(sb.Context()) + require.NoError(t, err) + + target := registry + "/nydus/alpine:" + identity.NewID() + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "compression": "nydus", + "oci-mediatypes": "true", + }, + }, + }, + }, nil) + require.NoError(t, err) + + img, err := imageService.Get(ctx, target) + require.NoError(t, err) + + manifest, err := images.Manifest(ctx, contentStore, img.Target, nil) + require.NoError(t, err) + + require.Equal(t, len(manifest.Layers), 3) + require.Equal(t, "true", manifest.Layers[0].Annotations[nydusify.LayerAnnotationNydusBlob]) + require.Equal(t, "true", manifest.Layers[1].Annotations[nydusify.LayerAnnotationNydusBlob]) + require.Equal(t, "true", manifest.Layers[2].Annotations[nydusify.LayerAnnotationNydusBootstrap]) + } + + buildOther := func(file string, compType compression.Type, forceCompression bool) { + orgImage := "docker.io/library/alpine:latest" + baseDef := llb.Image(orgImage).Run(llb.Args([]string{"/bin/touch", "/" + file})) + def, err := baseDef.Marshal(sb.Context()) + require.NoError(t, err) + + mediaTypes := map[compression.Type]string{ + compression.Gzip: ocispecs.MediaTypeImageLayerGzip, + compression.Zstd: ocispecs.MediaTypeImageLayer + "+zstd", + } + target := fmt.Sprintf("%s/%s/alpine:%s", registry, compType, identity.NewID()) + _, err = c.Solve(sb.Context(), def, SolveOpt{ + Exports: []ExportEntry{ + { + Type: ExporterImage, + Attrs: map[string]string{ + "name": target, + "push": "true", + "compression": compType.String(), + "oci-mediatypes": "true", + "force-compression": strconv.FormatBool(forceCompression), + }, + }, + }, + }, nil) + require.NoError(t, err) + + img, err := imageService.Get(ctx, target) + require.NoError(t, err) + + manifest, err := images.Manifest(ctx, contentStore, img.Target, nil) + require.NoError(t, err) + + require.Equal(t, 2, len(manifest.Layers)) + require.Equal(t, mediaTypes[compType], manifest.Layers[0].MediaType) + require.Equal(t, mediaTypes[compType], manifest.Layers[1].MediaType) + } + + // Make sure that the nydus compression layer is not mixed with other + // types of compression layers in an image. + buildNydus("foo") + buildOther("foo", compression.Gzip, false) + buildOther("foo", compression.Zstd, true) + + buildOther("bar", compression.Gzip, false) + buildOther("bar", compression.Zstd, true) + buildNydus("bar") +} diff --git a/client/client_test.go b/client/client_test.go index ead07cd53ffd..48b81779c4e3 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -79,12 +79,8 @@ type nopWriteCloser struct { func (nopWriteCloser) Close() error { return nil } func TestIntegration(t *testing.T) { - mirroredImages := integration.OfficialImages("busybox:latest", "alpine:latest") - mirroredImages["tonistiigi/test:nolayers"] = "docker.io/tonistiigi/test:nolayers" - mirroredImages["cpuguy83/buildkit-foreign:latest"] = "docker.io/cpuguy83/buildkit-foreign:latest" - mirrors := integration.WithMirroredImages(mirroredImages) - - tests := integration.TestFuncs( + testIntegration( + t, testCacheExportCacheKeyLoop, testRelativeWorkDir, testFileOpMkdirMkfile, @@ -186,6 +182,15 @@ func TestIntegration(t *testing.T) { testSBOMScan, testSBOMScanSingleRef, ) +} + +func testIntegration(t *testing.T, funcs ...func(t *testing.T, sb integration.Sandbox)) { + mirroredImages := integration.OfficialImages("busybox:latest", "alpine:latest") + mirroredImages["tonistiigi/test:nolayers"] = "docker.io/tonistiigi/test:nolayers" + mirroredImages["cpuguy83/buildkit-foreign:latest"] = "docker.io/cpuguy83/buildkit-foreign:latest" + mirrors := integration.WithMirroredImages(mirroredImages) + + tests := integration.TestFuncs(funcs...) tests = append(tests, diffOpTestCases()...) integration.Run(t, tests, mirrors) diff --git a/docs/nydus.md b/docs/nydus.md new file mode 100644 index 000000000000..709340b55d16 --- /dev/null +++ b/docs/nydus.md @@ -0,0 +1,47 @@ +## Nydus image formats + +Nydus is an OCI/Docker-compatible accelerated image format provided by the Dragonfly [image-service](https://github.com/dragonflyoss/image-service) project, which offers the ability to pull image data on-demand, without waiting for the entire image pull to complete and then start the container. It has been put in production usage and shown vast improvements to significantly reduce the overhead costs on time, network, disk IO of pulling image or starting container. + +Nydus image can be flexibly configured as a FUSE-based user-space filesystem or in-kernel [EROFS](https://www.kernel.org/doc/html/latest/filesystems/erofs.html) (from Linux kernel v5.16) with nydus daemon in user-space, integrating with VM-based container runtime like [KataContainers](https://katacontainers.io/) is much easier. + +## Creating Nydus images + +### Buildkitd with Nydus Support + +To enable buildkit support for Nydus image export, we need to build `buildkitd` with the following command: + +``` +go build -tags=nydus -o ./bin/buildkitd ./cmd/buildkitd +``` + +### Building Nydus with BuildKit + +Download `nydus-image` binary from [nydus release page](https://github.com/dragonflyoss/image-service/releases) (require v2.1.0 or higher), then put the `nydus-image` binary path into $PATH or specifying it on `NYDUS_BUILDER` environment variable for buildkitd: + +``` +env NYDUS_BUILDER=/path/to/nydus-image buildkitd ... +``` + +Note: some nydus intermediate files will be created in the working directory during the build process, which will be cleaned up automatically after the build is completed. Use the `NYDUS_WORKDIR` environment variable to change this working directory. + +On buildctl side, export nydus image as the one of compression types by specifying `compression=nydus` option: + +``` +buildctl build ... \ + --output type=image,name=docker.io/username/image,push=true,compression=nydus,oci-mediatypes=true +``` + +### Known limitations + +- The export of Nydus image and runtime (e.g. [docker](https://github.com/dragonflyoss/image-service/tree/master/contrib/docker-nydus-graphdriver), [containerd](https://github.com/containerd/nydus-snapshotter), etc.) is currently only supported on linux platform. +- Nydus image layers cannot be mixed with other compression types in the same image, so the `force-compression=true` option is automatically enabled when exporting both Nydus compression type and other compression types. +- Specifying a Nydus image as a base image in a Dockerfile is supported, but it does not currently support lazy pulling. +- Since exported Nydus image will always have one more metadata layer than images in other compression types, Nydus image cannot be exported/imported as cache. + +### Other ways to create Nydus images + +Pre-converted nydus images are available at [`ghcr.io/dragonflyoss/image-service` repository](https://github.com/orgs/dragonflyoss/packages?ecosystem=container) (mainly for testing purpose). + +[`Nydusify`](https://github.com/dragonflyoss/image-service/blob/master/docs/nydusify.md) The Nydusify CLI tool pulls & converts an OCIv1 image into a nydus image, and pushes nydus image to registry. + +[`Harbor Acceld`](https://github.com/goharbor/acceleration-service) Harbor acceld provides a general service to convert OCIv1 image to acceleration image like [Nydus](https://github.com/dragonflyoss/image-service) and [eStargz](https://github.com/containerd/stargz-snapshotter) etc. diff --git a/exporter/containerimage/opts.go b/exporter/containerimage/opts.go index 1a0b2d87fb59..c6b0dc8ede4d 100644 --- a/exporter/containerimage/opts.go +++ b/exporter/containerimage/opts.go @@ -88,6 +88,10 @@ func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error) c.EnableOCITypes(c.RefCfg.Compression.Type.String()) } + if c.RefCfg.Compression.Type.NeedsForceCompression() { + c.EnableForceCompression(c.RefCfg.Compression.Type.String()) + } + c.AddAnnotations(as) return rest, nil @@ -120,6 +124,18 @@ func (c *ImageCommitOpts) EnableOCITypes(reason string) { } } +func (c *ImageCommitOpts) EnableForceCompression(reason string) { + if !c.RefCfg.Compression.Force { + message := "forcibly turning on force-compression mode" + if reason != "" { + message += " for " + reason + } + logrus.Warn(message) + + c.RefCfg.Compression.Force = true + } +} + func parseBool(dest *bool, key string, value string) error { b, err := strconv.ParseBool(value) if err != nil { diff --git a/exporter/containerimage/patch.go b/exporter/containerimage/patch.go new file mode 100644 index 000000000000..93866b018bda --- /dev/null +++ b/exporter/containerimage/patch.go @@ -0,0 +1,18 @@ +//go:build !nydus +// +build !nydus + +package containerimage + +import ( + "context" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func patchImageLayers(ctx context.Context, remote *solver.Remote, history []ocispecs.History, ref cache.ImmutableRef, opts *ImageCommitOpts, sg session.Group) (*solver.Remote, []ocispecs.History, error) { + remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, opts.OCITypes) + return remote, history, nil +} diff --git a/exporter/containerimage/patch_nydus.go b/exporter/containerimage/patch_nydus.go new file mode 100644 index 000000000000..3a9336a66f64 --- /dev/null +++ b/exporter/containerimage/patch_nydus.go @@ -0,0 +1,35 @@ +//go:build nydus +// +build nydus + +package containerimage + +import ( + "context" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// patchImageLayers appends an extra nydus bootstrap layer +// to the manifest of nydus image, normalizes layers and +// history. The nydus bootstrap layer represents the whole +// metadata of filesystem view for the entire image. +func patchImageLayers(ctx context.Context, remote *solver.Remote, history []ocispecs.History, ref cache.ImmutableRef, opts *ImageCommitOpts, sg session.Group) (*solver.Remote, []ocispecs.History, error) { + if opts.RefCfg.Compression.Type != compression.Nydus { + remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, opts.OCITypes) + return remote, history, nil + } + + desc, err := cache.MergeNydus(ctx, ref, opts.RefCfg.Compression, sg) + if err != nil { + return nil, nil, errors.Wrap(err, "merge nydus layer") + } + remote.Descriptors = append(remote.Descriptors, *desc) + + remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, opts.OCITypes) + return remote, history, nil +} diff --git a/exporter/containerimage/writer.go b/exporter/containerimage/writer.go index 66aecacde580..bc5a11e32a51 100644 --- a/exporter/containerimage/writer.go +++ b/exporter/containerimage/writer.go @@ -95,7 +95,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session } } - mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, opts, inp.Ref, inp.Metadata[exptypes.ExporterImageConfigKey], &remotes[0], opts.Annotations.Platform(nil), inp.Metadata[exptypes.ExporterInlineCache], dtbi, opts.Epoch) + mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, opts, inp.Ref, inp.Metadata[exptypes.ExporterImageConfigKey], &remotes[0], opts.Annotations.Platform(nil), inp.Metadata[exptypes.ExporterInlineCache], dtbi, opts.Epoch, session.NewGroup(sessionID)) if err != nil { return nil, err } @@ -175,7 +175,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session } } - desc, _, err := ic.commitDistributionManifest(ctx, opts, r, config, &remotes[remotesMap[p.ID]], opts.Annotations.Platform(&p.Platform), inlineCache, dtbi, opts.Epoch) + desc, _, err := ic.commitDistributionManifest(ctx, opts, r, config, &remotes[remotesMap[p.ID]], opts.Annotations.Platform(&p.Platform), inlineCache, dtbi, opts.Epoch, session.NewGroup(sessionID)) if err != nil { return nil, err } @@ -412,7 +412,7 @@ func (ic *ImageWriter) extractAttestations(ctx context.Context, opts *ImageCommi return allStatements, nil } -func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *ImageCommitOpts, ref cache.ImmutableRef, config []byte, remote *solver.Remote, annotations *Annotations, inlineCache []byte, buildInfo []byte, epoch *time.Time) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) { +func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *ImageCommitOpts, ref cache.ImmutableRef, config []byte, remote *solver.Remote, annotations *Annotations, inlineCache []byte, buildInfo []byte, epoch *time.Time, sg session.Group) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) { if len(config) == 0 { var err error config, err = defaultImageConfig() @@ -432,7 +432,10 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *Ima return nil, nil, err } - remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, opts.OCITypes) + remote, history, err = patchImageLayers(ctx, remote, history, ref, opts, sg) + if err != nil { + return nil, nil, err + } config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, buildInfo, epoch) if err != nil { diff --git a/go.mod b/go.mod index 0009b6806845..5c25843a08fe 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,7 @@ require ( github.com/containerd/fuse-overlayfs-snapshotter v1.0.2 github.com/containerd/go-cni v1.1.6 github.com/containerd/go-runc v1.0.0 + github.com/containerd/nydus-snapshotter v0.3.1 github.com/containerd/stargz-snapshotter v0.12.0 github.com/containerd/stargz-snapshotter/estargz v0.12.0 github.com/containerd/typeurl v1.0.2 diff --git a/go.sum b/go.sum index 40d1a536f56b..e42640136d06 100644 --- a/go.sum +++ b/go.sum @@ -400,6 +400,8 @@ github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nydus-snapshotter v0.3.1 h1:b8WahTrPkt3XsabjG2o/leN4fw3HWZYr+qxo/Z8Mfzk= +github.com/containerd/nydus-snapshotter v0.3.1/go.mod h1:+8R7NX7vrjlxAgtidnsstwIhpzyTlriYPssTxH++uiM= github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4= github.com/containerd/stargz-snapshotter v0.12.0 h1:SRKo+YxmypnlyC7eKc9KNW0Ciy1Auo102s8E/aRGWKg= github.com/containerd/stargz-snapshotter v0.12.0/go.mod h1:LYKrfzJBNUs0hP9GA4YXTIudE6pjZ96W0X1Ls7yI7Gs= diff --git a/hack/test b/hack/test index 7b6ea11a8d1f..aa8c572a989f 100755 --- a/hack/test +++ b/hack/test @@ -12,6 +12,7 @@ set -eu -o pipefail : ${TEST_KEEP_CACHE=} : ${DOCKERFILE_RELEASES=} : ${BUILDKIT_WORKER_RANDOM=} +: ${BUILDKITD_TAGS=} if [ "$TEST_DOCKERD" == "1" ]; then if [ ! -f "$TEST_DOCKERD_BINARY" ]; then @@ -60,6 +61,7 @@ if [ "$TEST_COVERAGE" = "1" ]; then fi buildxCmd build $cacheFromFlags \ + --build-arg "BUILDKITD_TAGS=$BUILDKITD_TAGS" \ --target "integration-tests" \ --output "type=docker,name=$iid" \ $currentcontext diff --git a/util/compression/compression.go b/util/compression/compression.go index 9559c95b6c10..6e6a885c768d 100644 --- a/util/compression/compression.go +++ b/util/compression/compression.go @@ -24,11 +24,12 @@ type Finalizer func(context.Context, content.Store) (map[string]string, error) // Type represents compression type for blob data, which needs // to be implemented for each compression type. type Type interface { - Compress(comp Config) (compressorFunc Compressor, finalize Finalizer) + Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) Decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) NeedsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) NeedsComputeDiffBySelf() bool OnlySupportOCITypes() bool + NeedsForceCompression() bool MediaType() string String() string } @@ -83,7 +84,7 @@ const ( var Default gzipType = Gzip -func Parse(t string) (Type, error) { +func parse(t string) (Type, error) { switch t { case Uncompressed.String(): return Uncompressed, nil @@ -98,15 +99,7 @@ func Parse(t string) (Type, error) { } } -func IsMediaType(ct Type, mt string) bool { - mt, ok := toOCILayerType[mt] - if !ok { - return false - } - return mt == ct.MediaType() -} - -func FromMediaType(mediaType string) (Type, error) { +func fromMediaType(mediaType string) (Type, error) { switch toOCILayerType[mediaType] { case ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerNonDistributable: return Uncompressed, nil @@ -119,6 +112,14 @@ func FromMediaType(mediaType string) (Type, error) { } } +func IsMediaType(ct Type, mt string) bool { + mt, ok := toOCILayerType[mt] + if !ok { + return false + } + return mt == ct.MediaType() +} + // DetectLayerMediaType returns media type from existing blob data. func DetectLayerMediaType(ctx context.Context, cs content.Store, id digest.Digest, oci bool) (string, error) { ra, err := cs.ReaderAt(ctx, ocispecs.Descriptor{Digest: id}) diff --git a/util/compression/estargz.go b/util/compression/estargz.go index 358fc6b317ea..84d988b9b11f 100644 --- a/util/compression/estargz.go +++ b/util/compression/estargz.go @@ -24,7 +24,7 @@ var EStargzAnnotations = []string{estargz.TOCJSONDigestAnnotation, estargz.Store const containerdUncompressed = "containerd.io/uncompressed" const estargzLabel = "buildkit.io/compression/estargz" -func (c estargzType) Compress(comp Config) (compressorFunc Compressor, finalize Finalizer) { +func (c estargzType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { var cInfo *compressionInfo var writeErr error var mu sync.Mutex @@ -142,6 +142,10 @@ func (c estargzType) OnlySupportOCITypes() bool { return true } +func (c estargzType) NeedsForceCompression() bool { + return false +} + func (c estargzType) MediaType() string { return ocispecs.MediaTypeImageLayerGzip } diff --git a/util/compression/gzip.go b/util/compression/gzip.go index 081abef956c1..7120ba35e38d 100644 --- a/util/compression/gzip.go +++ b/util/compression/gzip.go @@ -10,7 +10,7 @@ import ( ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) -func (c gzipType) Compress(comp Config) (compressorFunc Compressor, finalize Finalizer) { +func (c gzipType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { return func(dest io.Writer, _ string) (io.WriteCloser, error) { return gzipWriter(comp)(dest) }, nil @@ -46,6 +46,10 @@ func (c gzipType) OnlySupportOCITypes() bool { return false } +func (c gzipType) NeedsForceCompression() bool { + return false +} + func (c gzipType) MediaType() string { return ocispecs.MediaTypeImageLayerGzip } diff --git a/util/compression/nydus.go b/util/compression/nydus.go new file mode 100644 index 000000000000..4e04be70b7ab --- /dev/null +++ b/util/compression/nydus.go @@ -0,0 +1,141 @@ +//go:build nydus +// +build nydus + +package compression + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + + nydusify "github.com/containerd/nydus-snapshotter/pkg/converter" +) + +type nydusType struct{} + +var Nydus = nydusType{} + +func init() { + toDockerLayerType[nydusify.MediaTypeNydusBlob] = nydusify.MediaTypeNydusBlob + toOCILayerType[nydusify.MediaTypeNydusBlob] = nydusify.MediaTypeNydusBlob +} + +func Parse(t string) (Type, error) { + ct, err := parse(t) + if err != nil && t == Nydus.String() { + return Nydus, nil + } + return ct, err +} + +func FromMediaType(mediaType string) (Type, error) { + ct, err := fromMediaType(mediaType) + if err != nil && mediaType == nydusify.MediaTypeNydusBlob { + return Nydus, nil + } + return ct, err +} + +func (c nydusType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { + digester := digest.Canonical.Digester() + return func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) { + writer := io.MultiWriter(dest, digester.Hash()) + return nydusify.Pack(ctx, writer, nydusify.PackOption{}) + }, func(ctx context.Context, cs content.Store) (map[string]string, error) { + // Fill necessary labels + uncompressedDgst := digester.Digest().String() + info, err := cs.Info(ctx, digester.Digest()) + if err != nil { + return nil, errors.Wrap(err, "get info from content store") + } + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[containerdUncompressed] = uncompressedDgst + if _, err := cs.Update(ctx, info, "labels."+containerdUncompressed); err != nil { + return nil, errors.Wrap(err, "update info to content store") + } + + // Fill annotations + annotations := map[string]string{ + containerdUncompressed: uncompressedDgst, + // Use this annotation to identify nydus blob layer. + nydusify.LayerAnnotationNydusBlob: "true", + } + return annotations, nil + } +} + +func (c nydusType) Decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) { + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + + go func() { + defer pw.Close() + if err := nydusify.Unpack(ctx, ra, pw, nydusify.UnpackOption{}); err != nil { + pw.CloseWithError(errors.Wrap(err, "unpack nydus blob")) + } + }() + + return pr, nil +} + +func (c nydusType) NeedsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) { + if !images.IsLayerType(desc.MediaType) { + return false, nil + } + + if isNydusBlob, err := c.Is(ctx, cs, desc); err != nil { + return true, nil + } else if isNydusBlob { + return false, nil + } + + return true, nil +} + +func (c nydusType) NeedsComputeDiffBySelf() bool { + return true +} + +func (c nydusType) OnlySupportOCITypes() bool { + return true +} + +func (c nydusType) NeedsForceCompression() bool { + return true +} + +func (c nydusType) MediaType() string { + return nydusify.MediaTypeNydusBlob +} + +func (c nydusType) String() string { + return "nydus" +} + +// Is returns true when the specified digest of content exists in +// the content store and it's nydus format. +func (c nydusType) Is(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) { + if desc.Annotations == nil { + return false, nil + } + hasMediaType := desc.MediaType == nydusify.MediaTypeNydusBlob + _, hasAnno := desc.Annotations[nydusify.LayerAnnotationNydusBlob] + + _, err := cs.Info(ctx, desc.Digest) + if err != nil { + return false, err + } + + return hasMediaType && hasAnno, nil +} diff --git a/util/compression/parse.go b/util/compression/parse.go new file mode 100644 index 000000000000..6567da4e877d --- /dev/null +++ b/util/compression/parse.go @@ -0,0 +1,12 @@ +//go:build !nydus +// +build !nydus + +package compression + +func Parse(t string) (Type, error) { + return parse(t) +} + +func FromMediaType(mediaType string) (Type, error) { + return fromMediaType(mediaType) +} diff --git a/util/compression/uncompressed.go b/util/compression/uncompressed.go index b2e7197c909c..5fc5b8e92a19 100644 --- a/util/compression/uncompressed.go +++ b/util/compression/uncompressed.go @@ -11,7 +11,7 @@ import ( ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) -func (c uncompressedType) Compress(comp Config) (compressorFunc Compressor, finalize Finalizer) { +func (c uncompressedType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { return func(dest io.Writer, mediaType string) (io.WriteCloser, error) { return &iohelper.NopWriteCloser{Writer: dest}, nil }, nil @@ -48,6 +48,10 @@ func (c uncompressedType) OnlySupportOCITypes() bool { return false } +func (c uncompressedType) NeedsForceCompression() bool { + return false +} + func (c uncompressedType) MediaType() string { return ocispecs.MediaTypeImageLayer } diff --git a/util/compression/zstd.go b/util/compression/zstd.go index 61313e3f51fe..f18872199f64 100644 --- a/util/compression/zstd.go +++ b/util/compression/zstd.go @@ -10,7 +10,7 @@ import ( ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) -func (c zstdType) Compress(comp Config) (compressorFunc Compressor, finalize Finalizer) { +func (c zstdType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { return func(dest io.Writer, _ string) (io.WriteCloser, error) { return zstdWriter(comp)(dest) }, nil @@ -42,6 +42,10 @@ func (c zstdType) OnlySupportOCITypes() bool { return false } +func (c zstdType) NeedsForceCompression() bool { + return false +} + func (c zstdType) MediaType() string { return mediaTypeImageLayerZstd } diff --git a/util/winlayers/applier.go b/util/winlayers/applier.go index d19025cbb7ee..f2b147d674af 100644 --- a/util/winlayers/applier.go +++ b/util/winlayers/applier.go @@ -38,7 +38,7 @@ type winApplier struct { func (s *winApplier) Apply(ctx context.Context, desc ocispecs.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispecs.Descriptor, err error) { if !hasWindowsLayerMode(ctx) { - return s.a.Apply(ctx, desc, mounts, opts...) + return s.apply(ctx, desc, mounts, opts...) } compressed, err := images.DiffCompression(ctx, desc.MediaType) diff --git a/util/winlayers/apply.go b/util/winlayers/apply.go new file mode 100644 index 000000000000..20b2faa03818 --- /dev/null +++ b/util/winlayers/apply.go @@ -0,0 +1,16 @@ +//go:build !nydus +// +build !nydus + +package winlayers + +import ( + "context" + + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/mount" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func (s *winApplier) apply(ctx context.Context, desc ocispecs.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispecs.Descriptor, err error) { + return s.a.Apply(ctx, desc, mounts, opts...) +} diff --git a/util/winlayers/apply_nydus.go b/util/winlayers/apply_nydus.go new file mode 100644 index 000000000000..1ef61b5bca0a --- /dev/null +++ b/util/winlayers/apply_nydus.go @@ -0,0 +1,73 @@ +//go:build nydus +// +build nydus + +package winlayers + +import ( + "context" + "io" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/mount" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + + nydusify "github.com/containerd/nydus-snapshotter/pkg/converter" +) + +func isNydusBlob(ctx context.Context, desc ocispecs.Descriptor) bool { + if desc.Annotations == nil { + return false + } + + hasMediaType := desc.MediaType == nydusify.MediaTypeNydusBlob + _, hasAnno := desc.Annotations[nydusify.LayerAnnotationNydusBlob] + return hasMediaType && hasAnno +} + +func (s *winApplier) apply(ctx context.Context, desc ocispecs.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispecs.Descriptor, err error) { + if !isNydusBlob(ctx, desc) { + return s.a.Apply(ctx, desc, mounts, opts...) + } + + var ocidesc ocispecs.Descriptor + if err := mount.WithTempMount(ctx, mounts, func(root string) error { + ra, err := s.cs.ReaderAt(ctx, desc) + if err != nil { + return errors.Wrap(err, "get reader from content store") + } + defer ra.Close() + + pr, pw := io.Pipe() + go func() { + defer pw.Close() + if err := nydusify.Unpack(ctx, ra, pw, nydusify.UnpackOption{}); err != nil { + pw.CloseWithError(errors.Wrap(err, "unpack nydus blob")) + } + }() + defer pr.Close() + + digester := digest.Canonical.Digester() + rc := &readCounter{ + r: io.TeeReader(pr, digester.Hash()), + } + + if _, err := archive.Apply(ctx, root, rc); err != nil { + return errors.Wrap(err, "apply nydus blob") + } + + ocidesc = ocispecs.Descriptor{ + MediaType: ocispecs.MediaTypeImageLayer, + Size: rc.c, + Digest: digester.Digest(), + } + + return nil + }); err != nil { + return ocispecs.Descriptor{}, err + } + + return ocidesc, nil +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/LICENSE b/vendor/github.com/containerd/nydus-snapshotter/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go new file mode 100644 index 000000000000..b7b9f2a2b7bd --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +const ( + ManifestOSFeatureNydus = "nydus.remoteimage.v1" + MediaTypeNydusBlob = "application/vnd.oci.image.layer.nydus.blob.v1" + BootstrapFileNameInLayer = "image/image.boot" + + ManifestNydusCache = "containerd.io/snapshot/nydus-cache" + + LayerAnnotationFSVersion = "containerd.io/snapshot/nydus-fs-version" + LayerAnnotationNydusBlob = "containerd.io/snapshot/nydus-blob" + LayerAnnotationNydusBlobDigest = "containerd.io/snapshot/nydus-blob-digest" + LayerAnnotationNydusBlobSize = "containerd.io/snapshot/nydus-blob-size" + LayerAnnotationNydusBlobIDs = "containerd.io/snapshot/nydus-blob-ids" + LayerAnnotationNydusBootstrap = "containerd.io/snapshot/nydus-bootstrap" + LayerAnnotationNydusSourceChainID = "containerd.io/snapshot/nydus-source-chainid" + + LayerAnnotationNydusReferenceBlobIDs = "containerd.io/snapshot/nydus-reference-blob-ids" + + LayerAnnotationUncompressed = "containerd.io/uncompressed" +) diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go new file mode 100644 index 000000000000..dc0130aefece --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go @@ -0,0 +1,839 @@ +//go:build !windows +// +build !windows + +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +import ( + "archive/tar" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/images/converter" + "github.com/containerd/containerd/labels" + "github.com/containerd/fifo" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + + "github.com/containerd/nydus-snapshotter/pkg/converter/tool" + "github.com/containerd/nydus-snapshotter/pkg/errdefs" +) + +const bootstrapNameInTar = "image.boot" +const blobNameInTar = "image.blob" + +const envNydusBuilder = "NYDUS_BUILDER" +const envNydusWorkDir = "NYDUS_WORKDIR" + +const configGCLabelKey = "containerd.io/gc.ref.content.config" + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +func getBuilder(specifiedPath string) string { + if specifiedPath != "" { + return specifiedPath + } + + builderPath := os.Getenv(envNydusBuilder) + if builderPath != "" { + return builderPath + } + + return "nydus-image" +} + +func ensureWorkDir(specifiedBasePath string) (string, error) { + var baseWorkDir string + + if specifiedBasePath != "" { + baseWorkDir = specifiedBasePath + } else { + baseWorkDir = os.Getenv(envNydusWorkDir) + } + if baseWorkDir == "" { + baseWorkDir = os.TempDir() + } + + if err := os.MkdirAll(baseWorkDir, 0750); err != nil { + return "", errors.Wrapf(err, "create base directory %s", baseWorkDir) + } + + workDirPath, err := os.MkdirTemp(baseWorkDir, "nydus-converter-") + if err != nil { + return "", errors.Wrap(err, "create work directory") + } + + return workDirPath, nil +} + +// Unpack a OCI formatted tar stream into a directory. +func unpackOciTar(ctx context.Context, dst string, reader io.Reader) error { + ds, err := compression.DecompressStream(reader) + if err != nil { + return errors.Wrap(err, "unpack stream") + } + defer ds.Close() + + if _, err := archive.Apply( + ctx, + dst, + ds, + archive.WithConvertWhiteout(func(hdr *tar.Header, file string) (bool, error) { + // Keep to extract all whiteout files. + return true, nil + }), + ); err != nil { + return errors.Wrap(err, "apply with convert whiteout") + } + + return nil +} + +// Unpack a Nydus formatted tar stream into a directory. +func unpackNydusTar(ctx context.Context, bootDst, blobDst string, ra content.ReaderAt) error { + boot, err := os.OpenFile(bootDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return errors.Wrapf(err, "write to bootstrap %s", bootDst) + } + defer boot.Close() + + if err = unpackBootstrapFromNydusTar(ctx, ra, boot); err != nil { + return errors.Wrap(err, "unpack bootstrap from nydus") + } + + blob, err := os.OpenFile(blobDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return errors.Wrapf(err, "write to blob %s", blobDst) + } + defer blob.Close() + + if err = unpackBlobFromNydusTar(ctx, ra, blob); err != nil { + return errors.Wrap(err, "unpack blob from nydus") + } + + return nil +} + +// Unpack the bootstrap from nydus formatted tar stream (blob + bootstrap). +// The nydus formatted tar stream is a tar-like structure that arranges the +// data as follows: +// +// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` +func unpackBootstrapFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.Writer) error { + cur := ra.Size() + reader := newSeekReader(ra) + + const headerSize = 512 + + // Seek from tail to head of nydus formatted tar stream to find nydus + // bootstrap data. + for { + if headerSize > cur { + return fmt.Errorf("invalid tar format at pos %d", cur) + } + + // Try to seek to the part of tar header. + var err error + cur, err = reader.Seek(cur-headerSize, io.SeekCurrent) + if err != nil { + return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize) + } + + tr := tar.NewReader(reader) + // Parse tar header. + hdr, err := tr.Next() + if err != nil { + return errors.Wrap(err, "parse tar header") + } + + if hdr.Name == bootstrapNameInTar { + // Try to seek to the part of tar data (bootstrap_data). + if hdr.Size > cur { + return fmt.Errorf("invalid tar format at pos %d", cur) + } + bootstrapOffset := cur - hdr.Size + _, err = reader.Seek(bootstrapOffset, io.SeekStart) + if err != nil { + return errors.Wrap(err, "seek to bootstrap data offset") + } + + // Copy tar data (bootstrap_data) to provided target writer. + if _, err := io.CopyN(target, reader, hdr.Size); err != nil { + return errors.Wrap(err, "copy bootstrap data to reader") + } + + return nil + } + + if cur == hdr.Size { + break + } + } + + return fmt.Errorf("can't find bootstrap in nydus tar") +} + +// Unpack the blob from nydus formatted tar stream (blob + bootstrap). +// The nydus formatted tar stream is a tar-like structure that arranges the +// data as follows: +// +// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` +func unpackBlobFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.Writer) error { + cur := ra.Size() + reader := newSeekReader(ra) + + const headerSize = 512 + + // Seek from tail to head of nydus formatted tar stream to find nydus + // bootstrap data. + for { + if headerSize > cur { + break + } + + // Try to seek to the part of tar header. + var err error + cur, err = reader.Seek(cur-headerSize, io.SeekStart) + if err != nil { + return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize) + } + + tr := tar.NewReader(reader) + // Parse tar header. + hdr, err := tr.Next() + if err != nil { + return errors.Wrap(err, "parse tar header") + } + + if hdr.Name == bootstrapNameInTar { + if hdr.Size > cur { + return fmt.Errorf("invalid tar format at pos %d", cur) + } + cur, err = reader.Seek(cur-hdr.Size, io.SeekStart) + if err != nil { + return errors.Wrap(err, "seek to bootstrap data offset") + } + } else if hdr.Name == blobNameInTar { + if hdr.Size > cur { + return fmt.Errorf("invalid tar format at pos %d", cur) + } + _, err = reader.Seek(cur-hdr.Size, io.SeekStart) + if err != nil { + return errors.Wrap(err, "seek to blob data offset") + } + if _, err := io.CopyN(target, reader, hdr.Size); err != nil { + return errors.Wrap(err, "copy blob data to reader") + } + return nil + } + } + + return nil +} + +// Pack converts an OCI tar stream to nydus formatted stream with a tar-like +// structure that arranges the data as follows: +// +// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` +// +// The caller should write OCI tar stream into the returned `io.WriteCloser`, +// then the Pack method will write the nydus formatted stream to `dest` +// provided by the caller. +// +// Important: the caller must check `io.WriteCloser.Close() == nil` to ensure +// the conversion workflow is finished. +func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) { + workDir, err := ensureWorkDir(opt.WorkDir) + if err != nil { + return nil, errors.Wrap(err, "ensure work directory") + } + defer func() { + if err != nil { + os.RemoveAll(workDir) + } + }() + + sourceDir := filepath.Join(workDir, "source") + if err := os.MkdirAll(sourceDir, 0755); err != nil { + return nil, errors.Wrap(err, "create source directory") + } + + pr, pw := io.Pipe() + + unpackDone := make(chan bool, 1) + go func() { + if err := unpackOciTar(ctx, sourceDir, pr); err != nil { + pr.CloseWithError(errors.Wrapf(err, "unpack to %s", sourceDir)) + close(unpackDone) + return + } + unpackDone <- true + }() + + wc := newWriteCloser(pw, func() error { + defer func() { + os.RemoveAll(workDir) + }() + + // Because PipeWriter#Close is called does not mean that the PipeReader + // has finished reading all the data, and unpack may not be complete yet, + // so we need to wait for that here. + <-unpackDone + + blobPath := filepath.Join(workDir, "blob") + blobFifo, err := fifo.OpenFifo(ctx, blobPath, syscall.O_CREAT|syscall.O_RDONLY|syscall.O_NONBLOCK, 0644) + if err != nil { + return errors.Wrapf(err, "create fifo file") + } + defer blobFifo.Close() + + go func() { + err := tool.Pack(tool.PackOption{ + BuilderPath: getBuilder(opt.BuilderPath), + + BlobPath: blobPath, + FsVersion: opt.FsVersion, + SourcePath: sourceDir, + ChunkDictPath: opt.ChunkDictPath, + PrefetchPatterns: opt.PrefetchPatterns, + Compressor: opt.Compressor, + Timeout: opt.Timeout, + }) + if err != nil { + pw.CloseWithError(errors.Wrapf(err, "convert blob for %s", sourceDir)) + blobFifo.Close() + } + }() + + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(dest, blobFifo, *buffer); err != nil { + return errors.Wrap(err, "pack nydus tar") + } + + return nil + }) + + return wc, nil +} + +// Merge multiple nydus bootstraps (from each layer of image) to a final +// bootstrap. And due to the possibility of enabling the `ChunkDictPath` +// option causes the data deduplication, it will return the actual blob +// digests referenced by the bootstrap. +func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) ([]digest.Digest, error) { + workDir, err := ensureWorkDir(opt.WorkDir) + if err != nil { + return nil, errors.Wrap(err, "ensure work directory") + } + defer os.RemoveAll(workDir) + + eg, ctx := errgroup.WithContext(ctx) + sourceBootstrapPaths := []string{} + for idx := range layers { + sourceBootstrapPaths = append(sourceBootstrapPaths, filepath.Join(workDir, layers[idx].Digest.Hex())) + eg.Go(func(idx int) func() error { + return func() error { + layer := layers[idx] + + // Use the hex hash string of whole tar blob as the bootstrap name. + bootstrap, err := os.Create(filepath.Join(workDir, layer.Digest.Hex())) + if err != nil { + return errors.Wrap(err, "create source bootstrap") + } + defer bootstrap.Close() + + if err := unpackBootstrapFromNydusTar(ctx, layer.ReaderAt, bootstrap); err != nil { + return errors.Wrap(err, "unpack nydus tar") + } + + return nil + } + }(idx)) + } + + if err := eg.Wait(); err != nil { + return nil, errors.Wrap(err, "unpack all bootstraps") + } + + targetBootstrapPath := filepath.Join(workDir, "bootstrap") + + blobDigests, err := tool.Merge(tool.MergeOption{ + BuilderPath: getBuilder(opt.BuilderPath), + + SourceBootstrapPaths: sourceBootstrapPaths, + TargetBootstrapPath: targetBootstrapPath, + ChunkDictPath: opt.ChunkDictPath, + PrefetchPatterns: opt.PrefetchPatterns, + OutputJSONPath: filepath.Join(workDir, "merge-output.json"), + Timeout: opt.Timeout, + }) + if err != nil { + return nil, errors.Wrap(err, "merge bootstrap") + } + + var rc io.ReadCloser + + if opt.WithTar { + rc, err = packToTar(targetBootstrapPath, fmt.Sprintf("image/%s", bootstrapNameInTar), false) + if err != nil { + return nil, errors.Wrap(err, "pack bootstrap to tar") + } + } else { + rc, err = os.Open(targetBootstrapPath) + if err != nil { + return nil, errors.Wrap(err, "open targe bootstrap") + } + } + defer rc.Close() + + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err = io.CopyBuffer(dest, rc, *buffer); err != nil { + return nil, errors.Wrap(err, "copy merged bootstrap") + } + + return blobDigests, nil +} + +// Unpack converts a nydus blob layer to OCI formatted tar stream. +func Unpack(ctx context.Context, ra content.ReaderAt, dest io.Writer, opt UnpackOption) error { + workDir, err := ensureWorkDir(opt.WorkDir) + if err != nil { + return errors.Wrap(err, "ensure work directory") + } + defer os.RemoveAll(workDir) + + bootPath, blobPath := filepath.Join(workDir, bootstrapNameInTar), filepath.Join(workDir, blobNameInTar) + if err = unpackNydusTar(ctx, bootPath, blobPath, ra); err != nil { + return errors.Wrap(err, "unpack nydus tar") + } + + tarPath := filepath.Join(workDir, "oci.tar") + blobFifo, err := fifo.OpenFifo(ctx, tarPath, syscall.O_CREAT|syscall.O_RDONLY|syscall.O_NONBLOCK, 0644) + if err != nil { + return errors.Wrapf(err, "create fifo file") + } + defer blobFifo.Close() + + unpackErrChan := make(chan error) + go func() { + defer close(unpackErrChan) + err := tool.Unpack(tool.UnpackOption{ + BuilderPath: getBuilder(opt.BuilderPath), + BootstrapPath: bootPath, + BlobPath: blobPath, + TarPath: tarPath, + Timeout: opt.Timeout, + }) + if err != nil { + blobFifo.Close() + unpackErrChan <- err + } + }() + + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(dest, blobFifo, *buffer); err != nil { + if unpackErr := <-unpackErrChan; unpackErr != nil { + return errors.Wrap(unpackErr, "unpack") + } + return errors.Wrap(err, "copy oci tar") + } + + return nil +} + +// IsNydusBlobAndExists returns true when the specified digest of content exists in +// the content store and it's nydus blob format. +func IsNydusBlobAndExists(ctx context.Context, cs content.Store, desc ocispec.Descriptor) bool { + _, err := cs.Info(ctx, desc.Digest) + if err != nil { + return false + } + + return IsNydusBlob(ctx, desc) +} + +// IsNydusBlob returns true when the specified descriptor is nydus blob format. +func IsNydusBlob(ctx context.Context, desc ocispec.Descriptor) bool { + if desc.Annotations == nil { + return false + } + + _, hasAnno := desc.Annotations[LayerAnnotationNydusBlob] + return hasAnno +} + +// LayerConvertFunc returns a function which converts an OCI image layer to +// a nydus blob layer, and set the media type to "application/vnd.oci.image.layer.nydus.blob.v1". +func LayerConvertFunc(opt PackOption) converter.ConvertFunc { + return func(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { + if !images.IsLayerType(desc.MediaType) { + return nil, nil + } + + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return nil, errors.Wrap(err, "get source blob reader") + } + defer ra.Close() + rdr := io.NewSectionReader(ra, 0, ra.Size()) + + ref := fmt.Sprintf("convert-nydus-from-%s", desc.Digest) + dst, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) + if err != nil { + return nil, errors.Wrap(err, "open blob writer") + } + defer dst.Close() + + tr, err := compression.DecompressStream(rdr) + if err != nil { + return nil, errors.Wrap(err, "decompress blob stream") + } + + digester := digest.SHA256.Digester() + pr, pw := io.Pipe() + tw, err := Pack(ctx, io.MultiWriter(pw, digester.Hash()), opt) + if err != nil { + return nil, errors.Wrap(err, "pack tar to nydus") + } + + go func() { + defer pw.Close() + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(tw, tr, *buffer); err != nil { + pw.CloseWithError(err) + return + } + if err := tr.Close(); err != nil { + pw.CloseWithError(err) + return + } + if err := tw.Close(); err != nil { + pw.CloseWithError(err) + return + } + }() + + if err := content.Copy(ctx, dst, pr, 0, ""); err != nil { + return nil, errors.Wrap(err, "copy nydus blob to content store") + } + + blobDigest := digester.Digest() + info, err := cs.Info(ctx, blobDigest) + if err != nil { + return nil, errors.Wrapf(err, "get blob info %s", blobDigest) + } + if info.Labels == nil { + info.Labels = map[string]string{} + } + // Write a diff id label of layer in content store for simplifying + // diff id calculation to speed up the conversion. + // See: https://github.com/containerd/containerd/blob/e4fefea5544d259177abb85b64e428702ac49c97/images/diffid.go#L49 + info.Labels[labels.LabelUncompressed] = blobDigest.String() + _, err = cs.Update(ctx, info) + if err != nil { + return nil, errors.Wrap(err, "update layer label") + } + + newDesc := ocispec.Descriptor{ + Digest: blobDigest, + Size: info.Size, + MediaType: MediaTypeNydusBlob, + Annotations: map[string]string{ + // Use `containerd.io/uncompressed` to generate DiffID of + // layer defined in OCI spec. + LayerAnnotationUncompressed: blobDigest.String(), + LayerAnnotationNydusBlob: "true", + }, + } + + if opt.Backend != nil { + blobRa, err := cs.ReaderAt(ctx, newDesc) + if err != nil { + return nil, errors.Wrap(err, "get nydus blob reader") + } + defer blobRa.Close() + + if err := opt.Backend.Push(ctx, blobRa, blobDigest); err != nil { + return nil, errors.Wrap(err, "push to storage backend") + } + } + + return &newDesc, nil + } +} + +// ConvertHookFunc returns a function which will be used as a callback +// called for each blob after conversion is done. The function only hooks +// the index conversion and the manifest conversion. +func ConvertHookFunc(opt MergeOption) converter.ConvertHookFunc { + return func(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) { + switch { + case images.IsIndexType(newDesc.MediaType): + return convertIndex(ctx, cs, orgDesc, newDesc) + case images.IsManifestType(newDesc.MediaType): + return convertManifest(ctx, cs, newDesc, opt) + default: + return newDesc, nil + } + } +} + +// convertIndex modifies the original index by appending "nydus.remoteimage.v1" +// to the Platform.OSFeatures of each modified manifest descriptors. +func convertIndex(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) { + var orgIndex ocispec.Index + if _, err := readJSON(ctx, cs, &orgIndex, orgDesc); err != nil { + return nil, errors.Wrap(err, "read target image index json") + } + // isManifestModified is a function to check whether the manifest is modified. + isManifestModified := func(manifest ocispec.Descriptor) bool { + for _, oldManifest := range orgIndex.Manifests { + if manifest.Digest == oldManifest.Digest { + return false + } + } + return true + } + + var index ocispec.Index + indexLabels, err := readJSON(ctx, cs, &index, *newDesc) + if err != nil { + return nil, errors.Wrap(err, "read index json") + } + for i, manifest := range index.Manifests { + if !isManifestModified(manifest) { + // Skip the manifest which is not modified. + continue + } + manifest.Platform.OSFeatures = append(manifest.Platform.OSFeatures, ManifestOSFeatureNydus) + index.Manifests[i] = manifest + } + // Update image index in content store. + newIndexDesc, err := writeJSON(ctx, cs, index, *newDesc, indexLabels) + if err != nil { + return nil, errors.Wrap(err, "write index json") + } + return newIndexDesc, nil +} + +// convertManifest merges all the nydus blob layers into a +// nydus bootstrap layer, update the image config, +// and modify the image manifest. +func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, error) { + var manifest ocispec.Manifest + manifestDesc := *newDesc + manifestLabels, err := readJSON(ctx, cs, &manifest, manifestDesc) + if err != nil { + return nil, errors.Wrap(err, "read manifest json") + } + + // Append bootstrap layer to manifest. + bootstrapDesc, blobDescs, err := MergeLayers(ctx, cs, manifest.Layers, MergeOption{ + BuilderPath: opt.BuilderPath, + WorkDir: opt.WorkDir, + ChunkDictPath: opt.ChunkDictPath, + FsVersion: opt.FsVersion, + WithTar: true, + }) + if err != nil { + return nil, errors.Wrap(err, "merge nydus layers") + } + if opt.Backend != nil { + // Only append nydus bootstrap layer into manifest, and do not put nydus + // blob layer into manifest if blob storage backend is specified. + manifest.Layers = []ocispec.Descriptor{*bootstrapDesc} + } else { + for idx, blobDesc := range blobDescs { + blobGCLabelKey := fmt.Sprintf("containerd.io/gc.ref.content.l.%d", idx) + manifestLabels[blobGCLabelKey] = blobDesc.Digest.String() + } + // Affected by chunk dict, the blob list referenced by final bootstrap + // are from different layers, part of them are from original layers, part + // from chunk dict bootstrap, so we need to rewrite manifest's layers here. + manifest.Layers = append(blobDescs, *bootstrapDesc) + } + + // Update the gc label of bootstrap layer + bootstrapGCLabelKey := fmt.Sprintf("containerd.io/gc.ref.content.l.%d", len(manifest.Layers)-1) + manifestLabels[bootstrapGCLabelKey] = bootstrapDesc.Digest.String() + + // Rewrite diff ids and remove useless annotation. + var config ocispec.Image + configLabels, err := readJSON(ctx, cs, &config, manifest.Config) + if err != nil { + return nil, errors.Wrap(err, "read image config") + } + if opt.Backend != nil { + config.RootFS.DiffIDs = []digest.Digest{digest.Digest(bootstrapDesc.Annotations[LayerAnnotationUncompressed])} + } else { + config.RootFS.DiffIDs = make([]digest.Digest, 0, len(manifest.Layers)) + for i, layer := range manifest.Layers { + config.RootFS.DiffIDs = append(config.RootFS.DiffIDs, digest.Digest(layer.Annotations[LayerAnnotationUncompressed])) + // Remove useless annotation. + delete(manifest.Layers[i].Annotations, LayerAnnotationUncompressed) + } + } + // Update image config in content store. + newConfigDesc, err := writeJSON(ctx, cs, config, manifest.Config, configLabels) + if err != nil { + return nil, errors.Wrap(err, "write image config") + } + manifest.Config = *newConfigDesc + // Update the config gc label + manifestLabels[configGCLabelKey] = newConfigDesc.Digest.String() + + // Update image manifest in content store. + newManifestDesc, err := writeJSON(ctx, cs, manifest, manifestDesc, manifestLabels) + if err != nil { + return nil, errors.Wrap(err, "write manifest") + } + + return newManifestDesc, nil +} + +// MergeLayers merges a list of nydus blob layer into a nydus bootstrap layer. +// The media type of the nydus bootstrap layer is "application/vnd.oci.image.layer.v1.tar+gzip". +func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, []ocispec.Descriptor, error) { + // Extracts nydus bootstrap from nydus format for each layer. + layers := []Layer{} + + var chainID digest.Digest + for _, blobDesc := range descs { + ra, err := cs.ReaderAt(ctx, blobDesc) + if err != nil { + return nil, nil, errors.Wrapf(err, "get reader for blob %q", blobDesc.Digest) + } + defer ra.Close() + layers = append(layers, Layer{ + Digest: blobDesc.Digest, + ReaderAt: ra, + }) + if chainID == "" { + chainID = identity.ChainID([]digest.Digest{blobDesc.Digest}) + } else { + chainID = identity.ChainID([]digest.Digest{chainID, blobDesc.Digest}) + } + } + + // Merge all nydus bootstraps into a final nydus bootstrap. + pr, pw := io.Pipe() + blobDigestChan := make(chan []digest.Digest, 1) + go func() { + defer pw.Close() + blobDigests, err := Merge(ctx, layers, pw, opt) + if err != nil { + pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap")) + } + blobDigestChan <- blobDigests + }() + + // Compress final nydus bootstrap to tar.gz and write into content store. + cw, err := content.OpenWriter(ctx, cs, content.WithRef("nydus-merge-"+chainID.String())) + if err != nil { + return nil, nil, errors.Wrap(err, "open content store writer") + } + defer cw.Close() + + gw := gzip.NewWriter(cw) + uncompressedDgst := digest.SHA256.Digester() + compressed := io.MultiWriter(gw, uncompressedDgst.Hash()) + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(compressed, pr, *buffer); err != nil { + return nil, nil, errors.Wrapf(err, "copy bootstrap targz into content store") + } + if err := gw.Close(); err != nil { + return nil, nil, errors.Wrap(err, "close gzip writer") + } + + compressedDgst := cw.Digest() + if err := cw.Commit(ctx, 0, compressedDgst, content.WithLabels(map[string]string{ + LayerAnnotationUncompressed: uncompressedDgst.Digest().String(), + })); err != nil { + if !errdefs.IsAlreadyExists(err) { + return nil, nil, errors.Wrap(err, "commit to content store") + } + } + if err := cw.Close(); err != nil { + return nil, nil, errors.Wrap(err, "close content store writer") + } + + bootstrapInfo, err := cs.Info(ctx, compressedDgst) + if err != nil { + return nil, nil, errors.Wrap(err, "get info from content store") + } + + blobDigests := <-blobDigestChan + blobDescs := []ocispec.Descriptor{} + blobIDs := []string{} + for _, blobDigest := range blobDigests { + blobInfo, err := cs.Info(ctx, blobDigest) + if err != nil { + return nil, nil, errors.Wrap(err, "get info from content store") + } + blobDesc := ocispec.Descriptor{ + Digest: blobDigest, + Size: blobInfo.Size, + MediaType: MediaTypeNydusBlob, + Annotations: map[string]string{ + LayerAnnotationUncompressed: blobDigest.String(), + LayerAnnotationNydusBlob: "true", + }, + } + blobDescs = append(blobDescs, blobDesc) + blobIDs = append(blobIDs, blobDigest.Hex()) + } + + blobIDsBytes, err := json.Marshal(blobIDs) + if err != nil { + return nil, nil, errors.Wrap(err, "marshal blob ids") + } + + if opt.FsVersion == "" { + opt.FsVersion = "5" + } + + bootstrapDesc := ocispec.Descriptor{ + Digest: compressedDgst, + Size: bootstrapInfo.Size, + MediaType: ocispec.MediaTypeImageLayerGzip, + Annotations: map[string]string{ + LayerAnnotationUncompressed: uncompressedDgst.Digest().String(), + LayerAnnotationFSVersion: opt.FsVersion, + // Use this annotation to identify nydus bootstrap layer. + LayerAnnotationNydusBootstrap: "true", + // Track all blob digests for nydus snapshotter. + LayerAnnotationNydusBlobIDs: string(blobIDsBytes), + }, + } + + return &bootstrapDesc, blobDescs, nil +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go new file mode 100644 index 000000000000..12cb53ed5373 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go @@ -0,0 +1,51 @@ +//go:build windows +// +build windows + +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images/converter" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) { + panic("not implemented") +} + +func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) error { + panic("not implemented") +} + +func Unpack(ctx context.Context, ia content.ReaderAt, dest io.Writer, opt UnpackOption) error { + panic("not implemented") +} + +func IsNydusBlobAndExists(ctx context.Context, cs content.Store, desc ocispec.Descriptor) bool { + panic("not implemented") +} + +func IsNydusBlob(ctx context.Context, desc ocispec.Descriptor) bool { + panic("not implemented") +} + +func LayerConvertFunc(opt PackOption) converter.ConvertFunc { + panic("not implemented") +} + +func ConvertHookFunc(opt MergeOption) converter.ConvertHookFunc { + panic("not implemented") +} + +func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, error) { + panic("not implemented") +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go new file mode 100644 index 000000000000..55e98cc09704 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package tool + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os/exec" + "strings" + "time" + + "github.com/containerd/nydus-snapshotter/pkg/errdefs" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var logger = logrus.WithField("module", "builder") + +type PackOption struct { + BuilderPath string + + BootstrapPath string + BlobPath string + FsVersion string + SourcePath string + ChunkDictPath string + PrefetchPatterns string + Compressor string + Timeout *time.Duration +} + +type MergeOption struct { + BuilderPath string + + SourceBootstrapPaths []string + TargetBootstrapPath string + ChunkDictPath string + PrefetchPatterns string + OutputJSONPath string + Timeout *time.Duration +} + +type UnpackOption struct { + BuilderPath string + BootstrapPath string + BlobPath string + TarPath string + Timeout *time.Duration +} + +type outputJSON struct { + Blobs []string +} + +func Pack(option PackOption) error { + if option.FsVersion == "" { + option.FsVersion = "5" + } + + args := []string{ + "create", + "--log-level", + "warn", + "--prefetch-policy", + "fs", + "--blob", + option.BlobPath, + "--source-type", + "directory", + "--whiteout-spec", + "none", + "--fs-version", + option.FsVersion, + "--inline-bootstrap", + } + if option.ChunkDictPath != "" { + args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath)) + } + if option.PrefetchPatterns == "" { + option.PrefetchPatterns = "/" + } + if option.Compressor != "" { + args = append(args, "--compressor", option.Compressor) + } + args = append(args, option.SourcePath) + + ctx := context.Background() + var cancel context.CancelFunc + if option.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *option.Timeout) + defer cancel() + } + + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + + cmd := exec.CommandContext(ctx, option.BuilderPath, args...) + cmd.Stdout = logger.Writer() + cmd.Stderr = logger.Writer() + cmd.Stdin = strings.NewReader(option.PrefetchPatterns) + + if err := cmd.Run(); err != nil { + if errdefs.IsSignalKilled(err) && option.Timeout != nil { + logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) + } else { + logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) + } + return err + } + + return nil +} + +func Merge(option MergeOption) ([]digest.Digest, error) { + args := []string{ + "merge", + "--log-level", + "warn", + "--prefetch-policy", + "fs", + "--output-json", + option.OutputJSONPath, + "--bootstrap", + option.TargetBootstrapPath, + } + if option.ChunkDictPath != "" { + args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath)) + } + if option.PrefetchPatterns == "" { + option.PrefetchPatterns = "/" + } + args = append(args, option.SourceBootstrapPaths...) + + ctx := context.Background() + var cancel context.CancelFunc + if option.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *option.Timeout) + defer cancel() + } + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + + cmd := exec.CommandContext(ctx, option.BuilderPath, args...) + cmd.Stdout = logger.Writer() + cmd.Stderr = logger.Writer() + cmd.Stdin = strings.NewReader(option.PrefetchPatterns) + + if err := cmd.Run(); err != nil { + if errdefs.IsSignalKilled(err) && option.Timeout != nil { + logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) + } else { + logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) + } + return nil, errors.Wrap(err, "run merge command") + } + + outputBytes, err := ioutil.ReadFile(option.OutputJSONPath) + if err != nil { + return nil, errors.Wrapf(err, "read file %s", option.OutputJSONPath) + } + var output outputJSON + err = json.Unmarshal(outputBytes, &output) + if err != nil { + return nil, errors.Wrapf(err, "unmarshal output json file %s", option.OutputJSONPath) + } + + blobDigests := []digest.Digest{} + for _, blobID := range output.Blobs { + blobDigests = append(blobDigests, digest.NewDigestFromHex(string(digest.SHA256), blobID)) + } + + return blobDigests, nil +} + +func Unpack(option UnpackOption) error { + args := []string{ + "unpack", + "--log-level", + "warn", + "--bootstrap", + option.BootstrapPath, + "--output", + option.TarPath, + } + if option.BlobPath != "" { + args = append(args, "--blob", option.BlobPath) + } + + ctx := context.Background() + var cancel context.CancelFunc + if option.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *option.Timeout) + defer cancel() + } + + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + + cmd := exec.CommandContext(ctx, option.BuilderPath, args...) + cmd.Stdout = logger.Writer() + cmd.Stderr = logger.Writer() + + if err := cmd.Run(); err != nil { + if errdefs.IsSignalKilled(err) && option.Timeout != nil { + logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) + } else { + logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) + } + return err + } + + return nil +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go new file mode 100644 index 000000000000..9d0590a0c964 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +import ( + "context" + "time" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" +) + +type Layer struct { + // Digest represents the hash of whole tar blob. + Digest digest.Digest + // ReaderAt holds the reader of whole tar blob. + ReaderAt content.ReaderAt +} + +// Backend uploads blobs generated by nydus-image builder to a backend storage such as: +// - oss: A object storage backend, which uses its SDK to upload blob file. +type Backend interface { + // Push pushes specified blob file to remote storage backend. + Push(ctx context.Context, ra content.ReaderAt, blobDigest digest.Digest) error + // Check checks whether a blob exists in remote storage backend, + // blob exists -> return (blobPath, nil) + // blob not exists -> return ("", err) + Check(blobDigest digest.Digest) (string, error) + // Type returns backend type name. + Type() string +} + +type PackOption struct { + // WorkDir is used as the work directory during layer pack. + WorkDir string + // BuilderPath holds the path of `nydus-image` binary tool. + BuilderPath string + // FsVersion specifies nydus RAFS format version, possible + // values: `5`, `6` (EROFS-compatible), default is `5`. + FsVersion string + // ChunkDictPath holds the bootstrap path of chunk dict image. + ChunkDictPath string + // PrefetchPatterns holds file path pattern list want to prefetch. + PrefetchPatterns string + // Compressor specifies nydus blob compression algorithm. + Compressor string + // Backend uploads blobs generated by nydus-image builder to a backend storage. + Backend Backend + // Timeout cancels execution once exceed the specified time. + Timeout *time.Duration +} + +type MergeOption struct { + // WorkDir is used as the work directory during layer merge. + WorkDir string + // BuilderPath holds the path of `nydus-image` binary tool. + BuilderPath string + // FsVersion specifies nydus RAFS format version, possible + // values: `5`, `6` (EROFS-compatible), default is `5`. + FsVersion string + // ChunkDictPath holds the bootstrap path of chunk dict image. + ChunkDictPath string + // PrefetchPatterns holds file path pattern list want to prefetch. + PrefetchPatterns string + // WithTar puts bootstrap into a tar stream (no gzip). + WithTar bool + // Backend uploads blobs generated by nydus-image builder to a backend storage. + Backend Backend + // Timeout cancels execution once exceed the specified time. + Timeout *time.Duration +} + +type UnpackOption struct { + // WorkDir is used as the work directory during layer unpack. + WorkDir string + // BuilderPath holds the path of `nydus-image` binary tool. + BuilderPath string + // Timeout cancels execution once exceed the specified time. + Timeout *time.Duration +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go new file mode 100644 index 000000000000..849d870b3409 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type writeCloser struct { + closed bool + io.WriteCloser + action func() error +} + +func (c *writeCloser) Close() error { + if c.closed { + return nil + } + + if err := c.WriteCloser.Close(); err != nil { + return err + } + c.closed = true + + if err := c.action(); err != nil { + return err + } + + return nil +} + +func newWriteCloser(wc io.WriteCloser, action func() error) *writeCloser { + return &writeCloser{ + WriteCloser: wc, + action: action, + } +} + +type seekReader struct { + io.ReaderAt + pos int64 +} + +func (ra *seekReader) Read(p []byte) (int, error) { + n, err := ra.ReaderAt.ReadAt(p, ra.pos) + ra.pos += int64(len(p)) + return n, err +} + +func (ra *seekReader) Seek(offset int64, whence int) (int64, error) { + if whence == io.SeekCurrent { + ra.pos += offset + } else if whence == io.SeekStart { + ra.pos = offset + } else { + return 0, fmt.Errorf("unsupported whence %d", whence) + } + return ra.pos, nil +} + +func newSeekReader(ra io.ReaderAt) *seekReader { + return &seekReader{ + ReaderAt: ra, + pos: 0, + } +} + +// packToTar makes .tar(.gz) stream of file named `name` and return reader. +func packToTar(src string, name string, compress bool) (io.ReadCloser, error) { + fi, err := os.Stat(src) + if err != nil { + return nil, err + } + + dirHdr := &tar.Header{ + Name: filepath.Dir(name), + Mode: 0755, + Typeflag: tar.TypeDir, + } + + hdr := &tar.Header{ + Name: name, + Mode: 0444, + Size: fi.Size(), + } + + reader, writer := io.Pipe() + + go func() { + // Prepare targz writer + var tw *tar.Writer + var gw *gzip.Writer + var err error + var file *os.File + + if compress { + gw = gzip.NewWriter(writer) + tw = tar.NewWriter(gw) + } else { + tw = tar.NewWriter(writer) + } + + defer func() { + err1 := tw.Close() + var err2 error + if gw != nil { + err2 = gw.Close() + } + + var finalErr error + + // Return the first error encountered to the other end and ignore others. + if err != nil { + finalErr = err + } else if err1 != nil { + finalErr = err1 + } else if err2 != nil { + finalErr = err2 + } + + writer.CloseWithError(finalErr) + }() + + file, err = os.Open(src) + if err != nil { + return + } + defer file.Close() + + // Write targz stream + if err = tw.WriteHeader(dirHdr); err != nil { + return + } + + if err = tw.WriteHeader(hdr); err != nil { + return + } + + if _, err = io.Copy(tw, file); err != nil { + return + } + }() + + return reader, nil +} + +// Copied from containerd/containerd project, copyright The containerd Authors. +// https://github.com/containerd/containerd/blob/4902059cb554f4f06a8d06a12134c17117809f4e/images/converter/default.go#L385 +func readJSON(ctx context.Context, cs content.Store, x interface{}, desc ocispec.Descriptor) (map[string]string, error) { + info, err := cs.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + labels := info.Labels + b, err := content.ReadBlob(ctx, cs, desc) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, x); err != nil { + return nil, err + } + return labels, nil +} + +// Copied from containerd/containerd project, copyright The containerd Authors. +// https://github.com/containerd/containerd/blob/4902059cb554f4f06a8d06a12134c17117809f4e/images/converter/default.go#L401 +func writeJSON(ctx context.Context, cs content.Store, x interface{}, oldDesc ocispec.Descriptor, labels map[string]string) (*ocispec.Descriptor, error) { + b, err := json.Marshal(x) + if err != nil { + return nil, err + } + dgst := digest.SHA256.FromBytes(b) + ref := fmt.Sprintf("converter-write-json-%s", dgst.String()) + w, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) + if err != nil { + return nil, err + } + if err := content.Copy(ctx, w, bytes.NewReader(b), int64(len(b)), dgst, content.WithLabels(labels)); err != nil { + return nil, err + } + if err := w.Close(); err != nil { + return nil, err + } + newDesc := oldDesc + newDesc.Size = int64(len(b)) + newDesc.Digest = dgst + return &newDesc, nil +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go new file mode 100644 index 000000000000..3bdf74cb9ddf --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2020. Ant Group. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package errdefs + +import ( + stderrors "errors" + "net" + "strings" + "syscall" + + "github.com/pkg/errors" +) + +const signalKilled = "signal: killed" + +var ( + ErrAlreadyExists = errors.New("already exists") + ErrNotFound = errors.New("not found") +) + +// IsAlreadyExists returns true if the error is due to already exists +func IsAlreadyExists(err error) bool { + return errors.Is(err, ErrAlreadyExists) +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsSignalKilled returns true if the error is signal killed +func IsSignalKilled(err error) bool { + return strings.Contains(err.Error(), signalKilled) +} + +// IsConnectionClosed returns true if error is due to connection closed +// this is used when snapshotter closed by sig term +func IsConnectionClosed(err error) bool { + switch err := err.(type) { + case *net.OpError: + return err.Err.Error() == "use of closed network connection" + default: + return false + } +} + +func IsErofsMounted(err error) bool { + return stderrors.Is(err, syscall.EBUSY) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 5e494eaa175d..c7099279a9fc 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -311,6 +311,11 @@ github.com/containerd/go-cni # github.com/containerd/go-runc v1.0.0 ## explicit; go 1.13 github.com/containerd/go-runc +# github.com/containerd/nydus-snapshotter v0.3.1 +## explicit; go 1.17 +github.com/containerd/nydus-snapshotter/pkg/converter +github.com/containerd/nydus-snapshotter/pkg/converter/tool +github.com/containerd/nydus-snapshotter/pkg/errdefs # github.com/containerd/stargz-snapshotter v0.12.0 ## explicit; go 1.16 github.com/containerd/stargz-snapshotter/cache