diff --git a/common/libimage/filters.go b/common/libimage/filters.go index a40abfe070..0387800b68 100644 --- a/common/libimage/filters.go +++ b/common/libimage/filters.go @@ -12,6 +12,7 @@ import ( "time" "github.com/sirupsen/logrus" + "go.podman.io/common/pkg/digestutils" filtersPkg "go.podman.io/common/pkg/filters" "go.podman.io/common/pkg/timetype" "go.podman.io/image/v5/docker/reference" @@ -481,7 +482,7 @@ func filterID(value string) filterFunc { // filterDigest creates a digest filter for matching the specified value. func filterDigest(value string) (filterFunc, error) { - if !strings.HasPrefix(value, "sha256:") { + if !digestutils.HasDigestPrefix(value) { return nil, fmt.Errorf("invalid value %q for digest filter", value) } return func(img *Image, _ *layerTree) (bool, error) { diff --git a/common/libimage/filters_test.go b/common/libimage/filters_test.go index d086e608f4..c1a8dfe119 100644 --- a/common/libimage/filters_test.go +++ b/common/libimage/filters_test.go @@ -169,6 +169,9 @@ func TestFilterDigest(t *testing.T) { }{ {string(busybox.Digest()[:10]), 1, busybox.ID()}, {string(alpine.Digest()[:10]), 1, alpine.ID()}, + // Test SHA512 digest prefix matching + {"sha512:1234567890abcdef", 0, ""}, // Non-existent SHA512 digest + {"sha256:1234567890abcdef", 0, ""}, // Non-existent SHA256 digest } { listOptions := &ListImagesOptions{ Filters: []string{"digest=" + test.filter}, @@ -176,12 +179,25 @@ func TestFilterDigest(t *testing.T) { listedImages, err := runtime.ListImages(ctx, listOptions) require.NoError(t, err, "%v", test) require.Len(t, listedImages, test.matches, "%s -> %v", test.filter, listedImages) - require.Equal(t, listedImages[0].ID(), test.id) + if test.matches > 0 { + require.Equal(t, listedImages[0].ID(), test.id) + } } _, err = runtime.ListImages(ctx, &ListImagesOptions{ Filters: []string{"digest=this-is-not-a-digest"}, }) assert.Error(t, err) + + // Test invalid digest algorithms + _, err = runtime.ListImages(ctx, &ListImagesOptions{ + Filters: []string{"digest=md5:1234567890abcdef"}, + }) + assert.Error(t, err) + + _, err = runtime.ListImages(ctx, &ListImagesOptions{ + Filters: []string{"digest=sha384:1234567890abcdef"}, + }) + assert.Error(t, err) } func TestFilterID(t *testing.T) { diff --git a/common/libimage/image.go b/common/libimage/image.go index 32995968e7..2764c96c9e 100644 --- a/common/libimage/image.go +++ b/common/libimage/image.go @@ -18,6 +18,7 @@ import ( ociv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" "go.podman.io/common/libimage/platform" + "go.podman.io/common/pkg/digestutils" "go.podman.io/image/v5/docker/reference" "go.podman.io/image/v5/image" "go.podman.io/image/v5/manifest" @@ -474,7 +475,7 @@ func (i *Image) removeRecursive(ctx context.Context, rmMap map[string]*RemoveIma // error. if referencedBy != "" && numNames != 1 { byID := strings.HasPrefix(i.ID(), referencedBy) - byDigest := strings.HasPrefix(referencedBy, "sha256:") + byDigest := digestutils.HasDigestPrefix(referencedBy) if !options.Force { if byID && numNames > 1 { return processedIDs, fmt.Errorf("unable to delete image %q by ID with more than one tag (%s): please force removal", i.ID(), i.Names()) @@ -577,7 +578,7 @@ var errTagDigest = errors.New("tag by digest not supported") // Tag the image with the specified name and store it in the local containers // storage. The name is normalized according to the rules of NormalizeName. func (i *Image) Tag(name string) error { - if strings.HasPrefix(name, "sha256:") { // ambiguous input + if digestutils.HasDigestPrefix(name) { // ambiguous input return fmt.Errorf("%s: %w", name, errTagDigest) } @@ -613,7 +614,7 @@ var errUntagDigest = errors.New("untag by digest not supported") // the local containers storage. The name is normalized according to the rules // of NormalizeName. func (i *Image) Untag(name string) error { - if strings.HasPrefix(name, "sha256:") { // ambiguous input + if digestutils.HasDigestPrefix(name) { // ambiguous input return fmt.Errorf("%s: %w", name, errUntagDigest) } diff --git a/common/libimage/import.go b/common/libimage/import.go index 54a77e4e95..910f6a3eaa 100644 --- a/common/libimage/import.go +++ b/common/libimage/import.go @@ -11,9 +11,11 @@ import ( v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" + "go.podman.io/common/pkg/digestutils" "go.podman.io/common/pkg/download" storageTransport "go.podman.io/image/v5/storage" tarballTransport "go.podman.io/image/v5/tarball" + supportedDigests "go.podman.io/storage/pkg/supported-digests" ) // ImportOptions allow for customizing image imports. @@ -128,5 +130,14 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption } } - return "sha256:" + name, nil + // Extract the algorithm from the getImageID result + // getImageID returns something like "@sha256:abc123" or "@sha512:def456" + // We need to preserve the algorithm that was actually used + if algorithm, hash := digestutils.ExtractAlgorithmFromDigest(name); algorithm != "" { + return algorithm + ":" + hash, nil + } + + // Fallback to configured algorithm if we can't parse the digest + digestAlgorithm := supportedDigests.TmpDigestForNewObjects() + return digestAlgorithm.String() + ":" + name, nil } diff --git a/common/libimage/pull.go b/common/libimage/pull.go index 1183311f4c..db9aaa1dbb 100644 --- a/common/libimage/pull.go +++ b/common/libimage/pull.go @@ -15,6 +15,7 @@ import ( ociSpec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" "go.podman.io/common/pkg/config" + "go.podman.io/common/pkg/digestutils" registryTransport "go.podman.io/image/v5/docker" dockerArchiveTransport "go.podman.io/image/v5/docker/archive" dockerDaemonTransport "go.podman.io/image/v5/docker/daemon" @@ -26,6 +27,7 @@ import ( "go.podman.io/image/v5/transports/alltransports" "go.podman.io/image/v5/types" "go.podman.io/storage" + supportedDigests "go.podman.io/storage/pkg/supported-digests" ) // PullOptions allows for customizing image pulls. @@ -101,7 +103,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP // If the image clearly refers to a local one, we can look it up directly. // In fact, we need to since they are not parseable. - if strings.HasPrefix(name, "sha256:") || (len(name) == 64 && !strings.ContainsAny(name, "/.:@")) { + if digestutils.IsDigestReference(name) { if pullPolicy == config.PullPolicyAlways { return nil, fmt.Errorf("pull policy is always but image has been referred to by ID (%s)", name) } @@ -261,7 +263,16 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, if err != nil { return nil, nil, err } - imageName = "sha256:" + storageName[1:] + // Extract the algorithm from the getImageID result + // getImageID returns something like "@sha256:abc123" or "@sha512:def456" + // We need to preserve the algorithm that was actually used + if algorithm, hash := digestutils.ExtractAlgorithmFromDigest(storageName); algorithm != "" { + imageName = algorithm + ":" + hash + } else { + // Fallback to configured algorithm + digestAlgorithm := supportedDigests.TmpDigestForNewObjects() + imageName = digestAlgorithm.String() + ":" + storageName[1:] + } } else { // If the OCI-reference includes an image reference, use it storageName = refName imageName = storageName @@ -280,7 +291,16 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, if err != nil { return nil, nil, err } - imageName = "sha256:" + storageName[1:] + // Extract the algorithm from the getImageID result + // getImageID returns something like "@sha256:abc123" or "@sha512:def456" + // We need to preserve the algorithm that was actually used + if algorithm, hash := digestutils.ExtractAlgorithmFromDigest(storageName); algorithm != "" { + imageName = algorithm + ":" + hash + } else { + // Fallback to configured algorithm + digestAlgorithm := supportedDigests.TmpDigestForNewObjects() + imageName = digestAlgorithm.String() + ":" + storageName[1:] + } default: named, err := NormalizeName(storageName) if err != nil { @@ -306,7 +326,16 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, if err != nil { return nil, nil, err } - imageName = "sha256:" + storageName[1:] + // Extract the algorithm from the getImageID result + // getImageID returns something like "@sha256:abc123" or "@sha512:def456" + // We need to preserve the algorithm that was actually used + if algorithm, hash := digestutils.ExtractAlgorithmFromDigest(storageName); algorithm != "" { + imageName = algorithm + ":" + hash + } else { + // Fallback to configured algorithm + digestAlgorithm := supportedDigests.TmpDigestForNewObjects() + imageName = digestAlgorithm.String() + ":" + storageName[1:] + } } // Create a storage reference. @@ -340,8 +369,17 @@ func (r *Runtime) storageReferencesReferencesFromArchiveReader(ctx context.Conte } destNames = append(destNames, destName) // Make sure the image can be loaded after the pull by - // replacing the @ with sha256:. - imageNames = append(imageNames, "sha256:"+destName[1:]) + // replacing the @ with the correct algorithm. + // Extract the algorithm from the getImageID result + // getImageID returns something like "@sha256:abc123" or "@sha512:def456" + // We need to preserve the algorithm that was actually used + if algorithm, hash := digestutils.ExtractAlgorithmFromDigest(destName); algorithm != "" { + imageNames = append(imageNames, algorithm+":"+hash) + } else { + // Fallback to configured algorithm + digestAlgorithm := supportedDigests.TmpDigestForNewObjects() + imageNames = append(imageNames, digestAlgorithm.String()+":"+destName[1:]) + } } else { for i := range destNames { ref, err := NormalizeName(destNames[i]) diff --git a/common/libimage/runtime.go b/common/libimage/runtime.go index 3378e6120a..1bb3178793 100644 --- a/common/libimage/runtime.go +++ b/common/libimage/runtime.go @@ -16,6 +16,7 @@ import ( "go.podman.io/common/libimage/define" "go.podman.io/common/libimage/platform" "go.podman.io/common/pkg/config" + "go.podman.io/common/pkg/digestutils" "go.podman.io/image/v5/docker/reference" "go.podman.io/image/v5/pkg/shortnames" storageTransport "go.podman.io/image/v5/storage" @@ -273,9 +274,9 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image, byDigest := false originalName := name - if strings.HasPrefix(name, "sha256:") { + if trimmed, found := digestutils.TrimDigestPrefix(name); found { byDigest = true - name = strings.TrimPrefix(name, "sha256:") + name = trimmed } byFullID := reference.IsFullIdentifier(name) diff --git a/common/pkg/digestutils/digestutils.go b/common/pkg/digestutils/digestutils.go new file mode 100644 index 0000000000..e1889eb707 --- /dev/null +++ b/common/pkg/digestutils/digestutils.go @@ -0,0 +1,134 @@ +//go:build !remote + +package digestutils + +import ( + "strings" + + "github.com/opencontainers/go-digest" +) + +// IsDigestReference determines if the given name is a digest-based reference. +// This function properly detects digests using the go-digest library instead of +// hardcoded string prefixes, avoiding conflicts with repository names like "sha256" or "sha512". +// +// The function supports: +// - Standard digest formats (algorithm:hash) like "sha256:abc123..." or "sha512:def456..." +// - Legacy 64-character hex format (SHA256 without algorithm prefix) for backward compatibility +// +// Examples: +// - "sha256:916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9" → true +// - "sha512:0e1e21ecf105ec853d24d728867ad70613c21663a4693074b2a3619c1bd39d66b588c33723bb466c72424e80e3ca63c249078ab347bab9428500e7ee43059d0d" → true +// - "abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890ab" → true (legacy) +// - "sha256" → false (repository name) +// - "sha512:latest" → false (repository with tag) +// - "docker.io/sha256:latest" → false (repository with domain) +func IsDigestReference(name string) bool { + // First check if it's a valid digest format (algorithm:hash) + if _, err := digest.Parse(name); err == nil { + return true + } + + // Also check for the legacy 64-character hex format (SHA256 without algorithm prefix) + // This maintains backward compatibility for existing deployments + if len(name) == 64 && !strings.ContainsAny(name, "/.:@") { + // Verify it's actually hex + for _, c := range name { + if (c < '0' || c > '9') && (c < 'a' || c > 'f') && (c < 'A' || c > 'F') { + return false + } + } + return true + } + + return false +} + +// ExtractAlgorithmFromDigest extracts the algorithm and hash from a digest string. +// It expects input like "@sha256:abc123" or "@sha512:def456". +// Returns (algorithm, hash) if successful, or ("", "") if parsing fails. +// +// This function validates that the extracted algorithm and hash form a valid digest. +// It is useful for preserving the algorithm that was determined by functions like getImageID, +// rather than overriding it with a globally configured algorithm. +// +// Examples: +// - "@sha256:abc123" → ("sha256", "abc123") (if valid) +// - "@sha512:def456" → ("sha512", "def456") (if valid) +// - "sha256:abc123" → ("", "") (missing @ prefix) +// - "@invalid" → ("", "") (missing colon) +// - "@sha256:invalid" → ("", "") (invalid hash format) +func ExtractAlgorithmFromDigest(digestStr string) (string, string) { + if !strings.HasPrefix(digestStr, "@") { + return "", "" + } + + // Remove the "@" prefix + digestStr = digestStr[1:] + + // Split on the first ":" to get algorithm:hash + parts := strings.SplitN(digestStr, ":", 2) + if len(parts) != 2 { + return "", "" + } + + algorithm, hash := parts[0], parts[1] + + // Validate that the algorithm and hash form a valid digest + // This ensures we only return valid digest components + if _, err := digest.Parse(algorithm + ":" + hash); err != nil { + return "", "" + } + + return algorithm, hash +} + +// HasDigestPrefix checks if a string starts with any supported digest algorithm prefix. +// This is more scalable than hardcoding multiple HasPrefix checks for individual algorithms. +// +// Examples: +// - "sha256:abc123" → true +// - "sha512:def456" → true +// - "image:latest" → false +// - "registry.io/repo" → false +func HasDigestPrefix(s string) bool { + // Check if the string starts with any supported digest algorithm + // This is more efficient than checking each algorithm individually + for _, prefix := range []string{"sha256:", "sha512:"} { + if strings.HasPrefix(s, prefix) { + return true + } + } + return false +} + +// GetDigestPrefix returns the digest algorithm prefix if the string starts with one. +// Returns the prefix (including colon) if found, empty string otherwise. +// +// Examples: +// - "sha256:abc123" → "sha256:" +// - "sha512:def456" → "sha512:" +// - "image:latest" → "" +func GetDigestPrefix(s string) string { + prefixes := []string{"sha256:", "sha512:"} + for _, prefix := range prefixes { + if strings.HasPrefix(s, prefix) { + return prefix + } + } + return "" +} + +// TrimDigestPrefix removes the digest algorithm prefix from a string if present. +// Returns the string without the prefix and a boolean indicating if a prefix was found. +// +// Examples: +// - "sha256:abc123" → ("abc123", true) +// - "sha512:def456" → ("def456", true) +// - "image:latest" → ("image:latest", false) +func TrimDigestPrefix(s string) (string, bool) { + if prefix := GetDigestPrefix(s); prefix != "" { + return strings.TrimPrefix(s, prefix), true + } + return s, false +} diff --git a/common/pkg/digestutils/digestutils_test.go b/common/pkg/digestutils/digestutils_test.go new file mode 100644 index 0000000000..7196d61afe --- /dev/null +++ b/common/pkg/digestutils/digestutils_test.go @@ -0,0 +1,582 @@ +//go:build !remote + +package digestutils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// TestIsDigestReference tests the IsDigestReference function to ensure +// it properly detects digest references while avoiding conflicts with repository names. +func TestIsDigestReference(t *testing.T) { + tests := []struct { + name string + input string + expected bool + desc string + }{ + // Valid digest formats + { + name: "sha256_digest", + input: "sha256:916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9", + expected: true, + desc: "Valid SHA256 digest should be detected", + }, + { + name: "sha512_digest", + input: "sha512:0e1e21ecf105ec853d24d728867ad70613c21663a4693074b2a3619c1bd39d66b588c33723bb466c72424e80e3ca63c249078ab347bab9428500e7ee43059d0d", + expected: true, + desc: "Valid SHA512 digest should be detected", + }, + { + name: "sha256_invalid_hash", + input: "sha256:abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890ab", + expected: true, + desc: "SHA256 with invalid hash should still be detected as digest format", + }, + + // Legacy 64-character hex format (backward compatibility) + { + name: "legacy_sha256_hex", + input: "abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890ab", + expected: true, + desc: "Legacy 64-character hex format should be detected", + }, + + // Repository names that should NOT be detected as digests + { + name: "sha256_repo_name", + input: "sha256", + expected: false, + desc: "Repository name 'sha256' should NOT be detected as digest", + }, + { + name: "sha512_repo_name", + input: "sha512", + expected: false, + desc: "Repository name 'sha512' should NOT be detected as digest", + }, + { + name: "sha256_with_tag", + input: "sha256:latest", + expected: false, + desc: "Repository 'sha256' with tag should NOT be detected as digest", + }, + { + name: "sha512_with_tag", + input: "sha512:latest", + expected: false, + desc: "Repository 'sha512' with tag should NOT be detected as digest", + }, + { + name: "sha256_with_domain", + input: "docker.io/sha256:latest", + expected: false, + desc: "Repository 'docker.io/sha256' should NOT be detected as digest", + }, + { + name: "sha512_with_domain", + input: "quay.io/sha512:latest", + expected: false, + desc: "Repository 'quay.io/sha512' should NOT be detected as digest", + }, + + // Invalid digest formats + { + name: "invalid_digest_format", + input: "sha256:invalid", + expected: false, + desc: "Invalid digest format should NOT be detected", + }, + { + name: "too_short_hex", + input: "abcd1234", + expected: false, + desc: "Too short hex string should NOT be detected", + }, + { + name: "too_long_hex", + input: "abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + expected: false, + desc: "Too long hex string should NOT be detected", + }, + { + name: "non_hex_characters", + input: "ghij567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", + expected: false, + desc: "Non-hex characters should NOT be detected", + }, + + // Edge cases + { + name: "empty_string", + input: "", + expected: false, + desc: "Empty string should NOT be detected as digest", + }, + { + name: "with_slash", + input: "abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890ab/", + expected: false, + desc: "Hex with slash should NOT be detected (not legacy format)", + }, + { + name: "with_dot", + input: "abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890ab.", + expected: false, + desc: "Hex with dot should NOT be detected (not legacy format)", + }, + { + name: "with_colon", + input: "abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890ab:", + expected: false, + desc: "Hex with colon should NOT be detected (not legacy format)", + }, + { + name: "with_at", + input: "abcd1234567890abcdef1234567890abcdef1234567890abcdef1234567890ab@", + expected: false, + desc: "Hex with @ should NOT be detected (not legacy format)", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := IsDigestReference(test.input) + assert.Equal(t, test.expected, result, + "Test: %s\nInput: %q\nExpected: %v, Got: %v\nDescription: %s", + test.name, test.input, test.expected, result, test.desc) + }) + } +} + +// TestExtractAlgorithmFromDigest tests the ExtractAlgorithmFromDigest function to ensure +// it properly extracts algorithm and hash from digest strings with @ prefix. +func TestExtractAlgorithmFromDigest(t *testing.T) { + tests := []struct { + name string + input string + expectedAlg string + expectedHash string + desc string + }{ + // Valid digest formats with @ prefix (using real digest values) + { + name: "sha256_with_at_prefix", + input: "@sha256:916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9", + expectedAlg: "sha256", + expectedHash: "916f0027a575074ce72a331777c3478d6513f786a591bd892da1a577bf2335f9", + desc: "SHA256 digest with @ prefix should extract algorithm and hash", + }, + { + name: "sha512_with_at_prefix", + input: "@sha512:0e1e21ecf105ec853d24d728867ad70613c21663a4693074b2a3619c1bd39d66b588c33723bb466c72424e80e3ca63c249078ab347bab9428500e7ee43059d0d", + expectedAlg: "sha512", + expectedHash: "0e1e21ecf105ec853d24d728867ad70613c21663a4693074b2a3619c1bd39d66b588c33723bb466c72424e80e3ca63c249078ab347bab9428500e7ee43059d0d", + desc: "SHA512 digest with @ prefix should extract algorithm and hash", + }, + + // Invalid formats (should return empty strings) + { + name: "missing_at_prefix", + input: "sha256:abc123", + expectedAlg: "", + expectedHash: "", + desc: "Digest without @ prefix should return empty strings", + }, + { + name: "missing_colon", + input: "@sha256", + expectedAlg: "", + expectedHash: "", + desc: "Digest without colon should return empty strings", + }, + { + name: "empty_string", + input: "", + expectedAlg: "", + expectedHash: "", + desc: "Empty string should return empty strings", + }, + { + name: "only_at_prefix", + input: "@", + expectedAlg: "", + expectedHash: "", + desc: "Only @ prefix should return empty strings", + }, + { + name: "at_with_colon_only", + input: "@:", + expectedAlg: "", + expectedHash: "", + desc: "@: should return empty strings", + }, + + // Edge cases (should fail validation) + { + name: "multiple_colons", + input: "@sha256:abc:def:ghi", + expectedAlg: "", + expectedHash: "", + desc: "Multiple colons should fail validation", + }, + { + name: "empty_algorithm", + input: "@:abc123", + expectedAlg: "", + expectedHash: "", + desc: "Empty algorithm should fail validation", + }, + { + name: "empty_hash", + input: "@sha256:", + expectedAlg: "", + expectedHash: "", + desc: "Empty hash should cause validation failure", + }, + { + name: "whitespace_in_algorithm", + input: "@ sha256:abc123", + expectedAlg: "", + expectedHash: "", + desc: "Whitespace in algorithm should cause validation failure", + }, + { + name: "whitespace_in_hash", + input: "@sha256: abc123 ", + expectedAlg: "", + expectedHash: "", + desc: "Whitespace in hash should cause validation failure", + }, + + // Invalid digest formats (should return empty strings due to validation) + { + name: "invalid_hash_format", + input: "@sha256:invalid", + expectedAlg: "", + expectedHash: "", + desc: "Invalid hash format should return empty strings", + }, + { + name: "invalid_algorithm", + input: "@invalid:abc123", + expectedAlg: "", + expectedHash: "", + desc: "Invalid algorithm should return empty strings", + }, + { + name: "wrong_hash_length", + input: "@sha256:abc", + expectedAlg: "", + expectedHash: "", + desc: "Wrong hash length should return empty strings", + }, + { + name: "non_hex_hash", + input: "@sha256:ghijklmnop", + expectedAlg: "", + expectedHash: "", + desc: "Non-hex hash should return empty strings", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + algorithm, hash := ExtractAlgorithmFromDigest(test.input) + assert.Equal(t, test.expectedAlg, algorithm, + "Test: %s\nInput: %q\nExpected algorithm: %q, Got: %q\nDescription: %s", + test.name, test.input, test.expectedAlg, algorithm, test.desc) + assert.Equal(t, test.expectedHash, hash, + "Test: %s\nInput: %q\nExpected hash: %q, Got: %q\nDescription: %s", + test.name, test.input, test.expectedHash, hash, test.desc) + }) + } +} + +// TestHasDigestPrefix tests the HasDigestPrefix function to ensure +// it properly detects digest prefixes for all supported algorithms. +func TestHasDigestPrefix(t *testing.T) { + tests := []struct { + name string + input string + expected bool + desc string + }{ + // Valid digest prefixes + { + name: "sha256_prefix", + input: "sha256:abc123", + expected: true, + desc: "SHA256 prefix should be detected", + }, + { + name: "sha512_prefix", + input: "sha512:def456", + expected: true, + desc: "SHA512 prefix should be detected", + }, + + // Invalid cases + { + name: "no_prefix", + input: "image:latest", + expected: false, + desc: "Image tag should not be detected as digest", + }, + { + name: "registry_repo", + input: "registry.io/repo:tag", + expected: false, + desc: "Registry repository should not be detected as digest", + }, + { + name: "empty_string", + input: "", + expected: false, + desc: "Empty string should not be detected as digest", + }, + { + name: "partial_prefix", + input: "sha256", + expected: false, + desc: "Partial prefix without colon should not be detected", + }, + { + name: "unsupported_algorithm", + input: "md5:abc123", + expected: false, + desc: "Unsupported algorithm should not be detected", + }, + { + name: "sha3_256_prefix", + input: "sha3-256:ghi789", + expected: false, + desc: "SHA3-256 prefix should not be detected (not supported)", + }, + { + name: "sha3_512_prefix", + input: "sha3-512:jkl012", + expected: false, + desc: "SHA3-512 prefix should not be detected (not supported)", + }, + { + name: "blake2b_256_prefix", + input: "blake2b-256:mno345", + expected: false, + desc: "Blake2b-256 prefix should not be detected (not supported)", + }, + { + name: "blake2b_512_prefix", + input: "blake2b-512:pqr678", + expected: false, + desc: "Blake2b-512 prefix should not be detected (not supported)", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := HasDigestPrefix(test.input) + assert.Equal(t, test.expected, result, + "Test: %s\nInput: %q\nExpected: %v, Got: %v\nDescription: %s", + test.name, test.input, test.expected, result, test.desc) + }) + } +} + +// TestGetDigestPrefix tests the GetDigestPrefix function to ensure +// it properly extracts digest algorithm prefixes. +func TestGetDigestPrefix(t *testing.T) { + tests := []struct { + name string + input string + expected string + desc string + }{ + // Valid digest prefixes + { + name: "sha256_prefix", + input: "sha256:abc123", + expected: "sha256:", + desc: "SHA256 prefix should be extracted", + }, + { + name: "sha512_prefix", + input: "sha512:def456", + expected: "sha512:", + desc: "SHA512 prefix should be extracted", + }, + + // Invalid cases + { + name: "no_prefix", + input: "image:latest", + expected: "", + desc: "Image tag should return empty prefix", + }, + { + name: "registry_repo", + input: "registry.io/repo:tag", + expected: "", + desc: "Registry repository should return empty prefix", + }, + { + name: "empty_string", + input: "", + expected: "", + desc: "Empty string should return empty prefix", + }, + { + name: "partial_prefix", + input: "sha256", + expected: "", + desc: "Partial prefix should return empty prefix", + }, + { + name: "unsupported_algorithm", + input: "md5:abc123", + expected: "", + desc: "Unsupported algorithm should return empty prefix", + }, + { + name: "sha3_256_prefix", + input: "sha3-256:ghi789", + expected: "", + desc: "SHA3-256 prefix should return empty (not supported)", + }, + { + name: "sha3_512_prefix", + input: "sha3-512:jkl012", + expected: "", + desc: "SHA3-512 prefix should return empty (not supported)", + }, + { + name: "blake2b_256_prefix", + input: "blake2b-256:mno345", + expected: "", + desc: "Blake2b-256 prefix should return empty (not supported)", + }, + { + name: "blake2b_512_prefix", + input: "blake2b-512:pqr678", + expected: "", + desc: "Blake2b-512 prefix should return empty (not supported)", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := GetDigestPrefix(test.input) + assert.Equal(t, test.expected, result, + "Test: %s\nInput: %q\nExpected: %q, Got: %q\nDescription: %s", + test.name, test.input, test.expected, result, test.desc) + }) + } +} + +// TestTrimDigestPrefix tests the TrimDigestPrefix function to ensure +// it properly removes digest algorithm prefixes. +func TestTrimDigestPrefix(t *testing.T) { + tests := []struct { + name string + input string + expectedResult string + expectedFound bool + desc string + }{ + // Valid digest prefixes + { + name: "sha256_prefix", + input: "sha256:abc123", + expectedResult: "abc123", + expectedFound: true, + desc: "SHA256 prefix should be removed", + }, + { + name: "sha512_prefix", + input: "sha512:def456", + expectedResult: "def456", + expectedFound: true, + desc: "SHA512 prefix should be removed", + }, + + // Invalid cases + { + name: "no_prefix", + input: "image:latest", + expectedResult: "image:latest", + expectedFound: false, + desc: "Image tag should not be modified", + }, + { + name: "registry_repo", + input: "registry.io/repo:tag", + expectedResult: "registry.io/repo:tag", + expectedFound: false, + desc: "Registry repository should not be modified", + }, + { + name: "empty_string", + input: "", + expectedResult: "", + expectedFound: false, + desc: "Empty string should not be modified", + }, + { + name: "partial_prefix", + input: "sha256", + expectedResult: "sha256", + expectedFound: false, + desc: "Partial prefix should not be modified", + }, + { + name: "unsupported_algorithm", + input: "md5:abc123", + expectedResult: "md5:abc123", + expectedFound: false, + desc: "Unsupported algorithm should not be modified", + }, + { + name: "sha3_256_prefix", + input: "sha3-256:ghi789", + expectedResult: "sha3-256:ghi789", + expectedFound: false, + desc: "SHA3-256 prefix should not be modified (not supported)", + }, + { + name: "sha3_512_prefix", + input: "sha3-512:jkl012", + expectedResult: "sha3-512:jkl012", + expectedFound: false, + desc: "SHA3-512 prefix should not be modified (not supported)", + }, + { + name: "blake2b_256_prefix", + input: "blake2b-256:mno345", + expectedResult: "blake2b-256:mno345", + expectedFound: false, + desc: "Blake2b-256 prefix should not be modified (not supported)", + }, + { + name: "blake2b_512_prefix", + input: "blake2b-512:pqr678", + expectedResult: "blake2b-512:pqr678", + expectedFound: false, + desc: "Blake2b-512 prefix should not be modified (not supported)", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result, found := TrimDigestPrefix(test.input) + assert.Equal(t, test.expectedResult, result, + "Test: %s\nInput: %q\nExpected result: %q, Got: %q\nDescription: %s", + test.name, test.input, test.expectedResult, result, test.desc) + assert.Equal(t, test.expectedFound, found, + "Test: %s\nInput: %q\nExpected found: %v, Got: %v\nDescription: %s", + test.name, test.input, test.expectedFound, found, test.desc) + }) + } +} diff --git a/image/copy/digesting_reader_test.go b/image/copy/digesting_reader_test.go index 2e17437ae3..896a67417e 100644 --- a/image/copy/digesting_reader_test.go +++ b/image/copy/digesting_reader_test.go @@ -34,6 +34,10 @@ func TestDigestingReaderRead(t *testing.T) { {[]byte(""), "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, {[]byte("abc"), "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"}, {make([]byte, 65537), "sha256:3266304f31be278d06c3bd3eb9aa3e00c59bedec0a890de466568b0b90b0e01f"}, + // SHA512 test cases + {[]byte(""), "sha512:cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"}, + {[]byte("abc"), "sha512:ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f"}, + {make([]byte, 65537), "sha512:490821004e5a6025fe335a11f6c27b0f73cae0434bd9d2e5ac7aee3370bd421718cad7d8fbfd5f39153b6ca3b05faede68f5d6e462eeaf143bb034791ceb72ab"}, } // Valid input for _, c := range cases { diff --git a/image/copy/single.go b/image/copy/single.go index 5c81fd2d53..0b169ef599 100644 --- a/image/copy/single.go +++ b/image/copy/single.go @@ -28,6 +28,7 @@ import ( "go.podman.io/image/v5/transports" "go.podman.io/image/v5/types" chunkedToc "go.podman.io/storage/pkg/chunked/toc" + supportedDigests "go.podman.io/storage/pkg/supported-digests" ) // imageCopier tracks state specific to a single image (possibly an item of a manifest list) @@ -977,7 +978,15 @@ func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadClo } // computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. +// This is a wrapper around computeDiffIDWithAlgorithm that uses the globally configured digest algorithm. func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) { + algorithm := supportedDigests.TmpDigestForNewObjects() + return computeDiffIDWithAlgorithm(stream, decompressor, algorithm) +} + +// computeDiffIDWithAlgorithm reads all input from layerStream, uncompresses it using decompressor if necessary, +// and returns its digest using the specified algorithm. +func computeDiffIDWithAlgorithm(stream io.Reader, decompressor compressiontypes.DecompressorFunc, algorithm digest.Algorithm) (digest.Digest, error) { if decompressor != nil { s, err := decompressor(stream) if err != nil { @@ -987,7 +996,7 @@ func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorF stream = s } - return digest.Canonical.FromReader(stream) + return algorithm.FromReader(stream) } // algorithmsByNames returns slice of Algorithms from a sequence of Algorithm Names diff --git a/image/copy/single_test.go b/image/copy/single_test.go index 66daf901ce..3a700d4cbc 100644 --- a/image/copy/single_test.go +++ b/image/copy/single_test.go @@ -17,6 +17,7 @@ import ( "go.podman.io/image/v5/pkg/compression" compressiontypes "go.podman.io/image/v5/pkg/compression/types" "go.podman.io/image/v5/types" + supportedDigests "go.podman.io/storage/pkg/supported-digests" ) func TestUpdatedBlobInfoFromReuse(t *testing.T) { @@ -110,6 +111,7 @@ func goDiffIDComputationGoroutineWithTimeout(layerStream io.ReadCloser, decompre } func TestDiffIDComputationGoroutine(t *testing.T) { + // Test with SHA256 (default) stream, err := os.Open("fixtures/Hello.uncompressed") require.NoError(t, err) res := goDiffIDComputationGoroutineWithTimeout(stream, nil) @@ -117,6 +119,16 @@ func TestDiffIDComputationGoroutine(t *testing.T) { assert.NoError(t, res.err) assert.Equal(t, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969", res.digest.String()) + // Test with SHA512 using the parametrized function + stream2, err := os.Open("fixtures/Hello.uncompressed") + require.NoError(t, err) + defer stream2.Close() + + // Use the parametrized function directly instead of overriding global state + digest, err := computeDiffIDWithAlgorithm(stream2, nil, digest.SHA512) + require.NoError(t, err) + assert.Equal(t, "sha512:3615f80c9d293ed7402687f94b22d58e529b8cc7916f8fac7fddf7fbd5af4cf777d3d795a7a00a16bf7e7f3fb9561ee9baae480da9fe7a18769e71886b03f315", digest.String()) + // Error reading input reader, writer := io.Pipe() err = writer.CloseWithError(errors.New("Expected error reading input in diffIDComputationGoroutine")) @@ -130,32 +142,59 @@ func TestComputeDiffID(t *testing.T) { for _, c := range []struct { filename string decompressor compressiontypes.DecompressorFunc + algorithm digest.Algorithm result digest.Digest }{ - {"fixtures/Hello.uncompressed", nil, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"}, - {"fixtures/Hello.gz", nil, "sha256:0bd4409dcd76476a263b8f3221b4ce04eb4686dec40bfdcc2e86a7403de13609"}, - {"fixtures/Hello.gz", compression.GzipDecompressor, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"}, - {"fixtures/Hello.zst", nil, "sha256:361a8e0372ad438a0316eb39a290318364c10b60d0a7e55b40aa3eafafc55238"}, - {"fixtures/Hello.zst", compression.ZstdDecompressor, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"}, + // SHA256 test cases (default) + {"fixtures/Hello.uncompressed", nil, digest.SHA256, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"}, + {"fixtures/Hello.gz", nil, digest.SHA256, "sha256:0bd4409dcd76476a263b8f3221b4ce04eb4686dec40bfdcc2e86a7403de13609"}, + {"fixtures/Hello.gz", compression.GzipDecompressor, digest.SHA256, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"}, + {"fixtures/Hello.zst", nil, digest.SHA256, "sha256:361a8e0372ad438a0316eb39a290318364c10b60d0a7e55b40aa3eafafc55238"}, + {"fixtures/Hello.zst", compression.ZstdDecompressor, digest.SHA256, "sha256:185f8db32271fe25f561a6fc938b2e264306ec304eda518007d1764826381969"}, + // SHA512 test cases + {"fixtures/Hello.uncompressed", nil, digest.SHA512, "sha512:3615f80c9d293ed7402687f94b22d58e529b8cc7916f8fac7fddf7fbd5af4cf777d3d795a7a00a16bf7e7f3fb9561ee9baae480da9fe7a18769e71886b03f315"}, + {"fixtures/Hello.gz", nil, digest.SHA512, "sha512:8ee9be48dfc6274f65199847cd18ff4711f00329c5063b17cd128ba45ea1b9cea2479db0266cc1f4a3902874fdd7306f9c8a615347c0603b893fc75184fcb627"}, + {"fixtures/Hello.gz", compression.GzipDecompressor, digest.SHA512, "sha512:3615f80c9d293ed7402687f94b22d58e529b8cc7916f8fac7fddf7fbd5af4cf777d3d795a7a00a16bf7e7f3fb9561ee9baae480da9fe7a18769e71886b03f315"}, + {"fixtures/Hello.zst", nil, digest.SHA512, "sha512:e4ddd61689ce9d1cdd49e11dc8dc89ca064bdb09e85b9df56658560b8207647a78b95d04c3f5f2fb31abf13e1822f0d19307df18a3fdf88f58ef24a50e71a1ae"}, + {"fixtures/Hello.zst", compression.ZstdDecompressor, digest.SHA512, "sha512:3615f80c9d293ed7402687f94b22d58e529b8cc7916f8fac7fddf7fbd5af4cf777d3d795a7a00a16bf7e7f3fb9561ee9baae480da9fe7a18769e71886b03f315"}, } { stream, err := os.Open(c.filename) require.NoError(t, err, c.filename) defer stream.Close() + // Save original algorithm and set the desired one + originalAlgorithm := supportedDigests.TmpDigestForNewObjects() + err = supportedDigests.TmpSetDigestForNewObjects(c.algorithm) + require.NoError(t, err) + + // Test the digest computation directly without ImageDestination diffID, err := computeDiffID(stream, c.decompressor) require.NoError(t, err, c.filename) assert.Equal(t, c.result, diffID) + + // Restore the original algorithm + err = supportedDigests.TmpSetDigestForNewObjects(originalAlgorithm) + require.NoError(t, err) } // Error initializing decompression - _, err := computeDiffID(bytes.NewReader([]byte{}), compression.GzipDecompressor) + originalAlgorithm := supportedDigests.TmpDigestForNewObjects() + err := supportedDigests.TmpSetDigestForNewObjects(digest.SHA256) + require.NoError(t, err) + _, err = computeDiffID(bytes.NewReader([]byte{}), compression.GzipDecompressor) assert.Error(t, err) + err = supportedDigests.TmpSetDigestForNewObjects(originalAlgorithm) + require.NoError(t, err) // Error reading input reader, writer := io.Pipe() defer reader.Close() err = writer.CloseWithError(errors.New("Expected error reading input in computeDiffID")) require.NoError(t, err) + err = supportedDigests.TmpSetDigestForNewObjects(digest.SHA256) + require.NoError(t, err) _, err = computeDiffID(reader, nil) assert.Error(t, err) + err = supportedDigests.TmpSetDigestForNewObjects(originalAlgorithm) + require.NoError(t, err) } diff --git a/image/directory/directory_dest.go b/image/directory/directory_dest.go index 31842f7260..bf1a9a030e 100644 --- a/image/directory/directory_dest.go +++ b/image/directory/directory_dest.go @@ -151,7 +151,7 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io. } }() - digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + digester, stream := putblobdigest.DigestIfConfiguredUnknown(stream, inputInfo) // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). size, err := io.Copy(blobFile, stream) if err != nil { diff --git a/image/docker/docker_image_dest.go b/image/docker/docker_image_dest.go index 86077fe932..a4909b56d2 100644 --- a/image/docker/docker_image_dest.go +++ b/image/docker/docker_image_dest.go @@ -178,7 +178,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream return private.UploadedBlob{}, fmt.Errorf("determining upload URL: %w", err) } - digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + digester, stream := putblobdigest.DigestIfConfiguredUnknown(stream, inputInfo) sizeCounter := &sizeCounter{} stream = io.TeeReader(stream, sizeCounter) diff --git a/image/internal/image/docker_schema1.go b/image/internal/image/docker_schema1.go index da7a943b3d..62839931a8 100644 --- a/image/internal/image/docker_schema1.go +++ b/image/internal/image/docker_schema1.go @@ -9,6 +9,7 @@ import ( "go.podman.io/image/v5/docker/reference" "go.podman.io/image/v5/manifest" "go.podman.io/image/v5/types" + supportedDigests "go.podman.io/storage/pkg/supported-digests" ) type manifestSchema1 struct { @@ -160,6 +161,13 @@ func (m *manifestSchema1) convertToManifestSchema2Generic(ctx context.Context, o // // Based on github.com/docker/docker/distribution/pull_v2.go func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) { + // Explicitly reject SHA512+Schema1 combinations as they are not supported + // Schema1 is deprecated and Docker/registry don't support SHA512+Schema1 + configuredAlgorithm := supportedDigests.TmpDigestForNewObjects() + if configuredAlgorithm == digest.SHA512 { + return nil, fmt.Errorf("SHA512+Schema1 is not supported: Schema1 is deprecated and Docker/registry do not support SHA512 with Schema1 manifests. Please use SHA256 or convert to Schema2/OCI format") + } + uploadedLayerInfos := options.InformationOnly.LayerInfos layerDiffIDs := options.InformationOnly.LayerDiffIDs @@ -219,7 +227,7 @@ func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *t configDescriptor := manifest.Schema2Descriptor{ MediaType: manifest.DockerV2Schema2ConfigMediaType, Size: int64(len(configJSON)), - Digest: digest.FromBytes(configJSON), + Digest: supportedDigests.TmpDigestForNewObjects().FromBytes(configJSON), } if options.LayerInfos != nil { diff --git a/image/internal/image/docker_schema1_test.go b/image/internal/image/docker_schema1_test.go index 9d399eae4c..b2a3556dbc 100644 --- a/image/internal/image/docker_schema1_test.go +++ b/image/internal/image/docker_schema1_test.go @@ -15,6 +15,7 @@ import ( "go.podman.io/image/v5/docker/reference" "go.podman.io/image/v5/manifest" "go.podman.io/image/v5/types" + supportedDigests "go.podman.io/storage/pkg/supported-digests" ) var schema1FixtureLayerInfos = []types.BlobInfo{ @@ -720,3 +721,38 @@ func TestManifestSchema1CanChangeLayerCompression(t *testing.T) { assert.True(t, m.CanChangeLayerCompression("")) } } + +// TestSchema1SHA512Rejection tests that SHA512+Schema1 combinations are explicitly rejected +func TestSchema1SHA512Rejection(t *testing.T) { + // Save original algorithm and restore it after the test + originalAlgorithm := supportedDigests.TmpDigestForNewObjects() + defer func() { + err := supportedDigests.TmpSetDigestForNewObjects(originalAlgorithm) + require.NoError(t, err) + }() + + // Set SHA512 algorithm + err := supportedDigests.TmpSetDigestForNewObjects(digest.SHA512) + require.NoError(t, err) + + // Create a schema1 manifest + manifestBlob, err := os.ReadFile(filepath.Join("fixtures", "schema1.json")) + require.NoError(t, err) + + m, err := manifestSchema1FromManifest(manifestBlob) + require.NoError(t, err) + + // Try to convert to schema2 with SHA512 - this should fail + schema1Manifest := m.(*manifestSchema1) + _, err = schema1Manifest.convertToManifestSchema2(context.Background(), &types.ManifestUpdateOptions{ + InformationOnly: types.ManifestUpdateInformation{ + LayerInfos: schema1FixtureLayerInfos, + }, + }) + + // Should get an error about SHA512+Schema1 not being supported + require.Error(t, err) + assert.Contains(t, err.Error(), "SHA512+Schema1 is not supported") + assert.Contains(t, err.Error(), "Schema1 is deprecated") + assert.Contains(t, err.Error(), "Please use SHA256 or convert to Schema2/OCI format") +} diff --git a/image/internal/image/docker_schema2.go b/image/internal/image/docker_schema2.go index 1586d67900..a06e7b10b0 100644 --- a/image/internal/image/docker_schema2.go +++ b/image/internal/image/docker_schema2.go @@ -17,6 +17,7 @@ import ( "go.podman.io/image/v5/internal/iolimits" "go.podman.io/image/v5/manifest" "go.podman.io/image/v5/pkg/blobinfocache/none" + "go.podman.io/image/v5/pkg/digestvalidation" "go.podman.io/image/v5/types" ) @@ -110,9 +111,11 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { if err != nil { return nil, err } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.ConfigDescriptor.Digest { - return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) + expectedDigest := m.m.ConfigDescriptor.Digest + + // Validate the blob against the expected digest using centralized validation + if err := digestvalidation.ValidateBlobAgainstDigest(blob, expectedDigest); err != nil { + return nil, fmt.Errorf("config descriptor validation failed: %w", err) } m.configBlob = blob } diff --git a/image/internal/image/oci.go b/image/internal/image/oci.go index 56a1a6d64e..ef6509a075 100644 --- a/image/internal/image/oci.go +++ b/image/internal/image/oci.go @@ -8,7 +8,6 @@ import ( "slices" ociencspec "github.com/containers/ocicrypt/spec" - "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "go.podman.io/image/v5/docker/reference" "go.podman.io/image/v5/internal/iolimits" @@ -74,9 +73,12 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { if err != nil { return nil, err } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.Config.Digest { - return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) + // Use the same algorithm as the expected digest + expectedDigest := m.m.Config.Digest + algorithm := expectedDigest.Algorithm() + computedDigest := algorithm.FromBytes(blob) + if computedDigest != expectedDigest { + return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, expectedDigest) } m.configBlob = blob } diff --git a/image/internal/manifest/manifest.go b/image/internal/manifest/manifest.go index 687b37fb07..a4b92601a4 100644 --- a/image/internal/manifest/manifest.go +++ b/image/internal/manifest/manifest.go @@ -8,6 +8,7 @@ import ( digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" compressiontypes "go.podman.io/image/v5/pkg/compression/types" + supportedDigests "go.podman.io/storage/pkg/supported-digests" ) // FIXME: Should we just use docker/distribution and docker/docker implementations directly? @@ -123,7 +124,7 @@ func Digest(manifest []byte) (digest.Digest, error) { } } - return digest.FromBytes(manifest), nil + return supportedDigests.TmpDigestForNewObjects().FromBytes(manifest), nil } // MatchesDigest returns true iff the manifest matches expectedDigest. diff --git a/image/internal/manifest/manifest_test.go b/image/internal/manifest/manifest_test.go index da50bf76da..4de93fe448 100644 --- a/image/internal/manifest/manifest_test.go +++ b/image/internal/manifest/manifest_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.podman.io/image/v5/pkg/compression" + supportedDigests "go.podman.io/storage/pkg/supported-digests" ) const ( @@ -87,6 +88,9 @@ func TestMatchesDigest(t *testing.T) { {"v2s1.manifest.json", TestDockerV2S2ManifestDigest, false}, // Unrecognized algorithm {"v2s2.manifest.json", digest.Digest("md5:2872f31c5c1f62a694fbd20c1e85257c"), false}, + // SHA512 test cases (these should fail because we're using SHA256 by default) + {"v2s2.manifest.json", digest.SHA512.FromBytes([]byte("test")), false}, + {"v2s1.manifest.json", digest.SHA512.FromBytes([]byte("test")), false}, // Mangled format {"v2s2.manifest.json", digest.Digest(TestDockerV2S2ManifestDigest.String() + "abc"), false}, {"v2s2.manifest.json", digest.Digest(TestDockerV2S2ManifestDigest.String()[:20]), false}, @@ -112,6 +116,74 @@ func TestMatchesDigest(t *testing.T) { assert.NoError(t, err) } +func TestMatchesDigestWithSHA512(t *testing.T) { + // Save original algorithm and restore it after the test + originalAlgorithm := supportedDigests.TmpDigestForNewObjects() + defer func() { + err := supportedDigests.TmpSetDigestForNewObjects(originalAlgorithm) + require.NoError(t, err) + }() + + // Set SHA512 algorithm + err := supportedDigests.TmpSetDigestForNewObjects(digest.SHA512) + require.NoError(t, err) + + cases := []struct { + path string + expectedDigest digest.Digest + result bool + }{ + // Success cases with SHA512 + {"v2s2.manifest.json", digest.SHA512.FromBytes([]byte("test")), false}, // Wrong data + {"v2s1.manifest.json", digest.SHA512.FromBytes([]byte("test")), false}, // Wrong data + // Empty manifest + {"", digest.SHA512.FromBytes([]byte{}), true}, + // Wrong algorithm (SHA256 when SHA512 is configured) + {"v2s2.manifest.json", TestDockerV2S2ManifestDigest, false}, + {"v2s1.manifest.json", TestDockerV2S1ManifestDigest, false}, + // Mangled format + {"v2s2.manifest.json", digest.Digest("sha512:invalid"), false}, + {"v2s2.manifest.json", digest.Digest(""), false}, + } + + for _, c := range cases { + var manifest []byte + var err error + + if c.path == "" { + // Empty manifest case + manifest = []byte{} + } else { + manifest, err = os.ReadFile(filepath.Join("testdata", c.path)) + require.NoError(t, err) + } + + // For success cases, compute the correct SHA512 digest + if c.result { + c.expectedDigest = digest.SHA512.FromBytes(manifest) + } + + res, err := MatchesDigest(manifest, c.expectedDigest) + require.NoError(t, err) + assert.Equal(t, c.result, res, "path=%s, expectedDigest=%s", c.path, c.expectedDigest) + } + + // Test with actual manifest files and their correct SHA512 digests + manifestFiles := []string{"v2s2.manifest.json", "v2s1.manifest.json"} + for _, file := range manifestFiles { + manifest, err := os.ReadFile(filepath.Join("testdata", file)) + require.NoError(t, err) + + // Use the Digest function to get the correct digest (which handles signature stripping) + expectedDigest, err := Digest(manifest) + require.NoError(t, err) + + res, err := MatchesDigest(manifest, expectedDigest) + require.NoError(t, err) + assert.True(t, res, "MatchesDigest should work with SHA512 for %s", file) + } +} + func TestNormalizedMIMEType(t *testing.T) { for _, c := range []string{ // Valid MIME types, normalized to themselves DockerV2Schema1MediaType, diff --git a/image/internal/putblobdigest/put_blob_digest.go b/image/internal/putblobdigest/put_blob_digest.go index ce50542751..047f8641bb 100644 --- a/image/internal/putblobdigest/put_blob_digest.go +++ b/image/internal/putblobdigest/put_blob_digest.go @@ -5,6 +5,7 @@ import ( "github.com/opencontainers/go-digest" "go.podman.io/image/v5/types" + supportedDigests "go.podman.io/storage/pkg/supported-digests" ) // Digester computes a digest of the provided stream, if not known yet. @@ -13,7 +14,7 @@ type Digester struct { digester digest.Digester // Or nil } -// newDigester initiates computation of a digest.Canonical digest of stream, +// newDigester initiates computation of a digest (using the configured algorithm) of stream, // if !validDigest; otherwise it just records knownDigest to be returned later. // The caller MUST use the returned stream instead of the original value. func newDigester(stream io.Reader, knownDigest digest.Digest, validDigest bool) (Digester, io.Reader) { @@ -21,7 +22,7 @@ func newDigester(stream io.Reader, knownDigest digest.Digest, validDigest bool) return Digester{knownDigest: knownDigest}, stream } else { res := Digester{ - digester: digest.Canonical.Digester(), + digester: supportedDigests.TmpDigestForNewObjects().Digester(), } stream = io.TeeReader(stream, res.digester.Hash()) return res, stream @@ -37,13 +38,14 @@ func DigestIfUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Re return newDigester(stream, d, d != "") } -// DigestIfCanonicalUnknown initiates computation of a digest.Canonical digest of stream, -// if a digest.Canonical digest is not supplied in the provided blobInfo; +// DigestIfConfiguredUnknown initiates computation of a digest (using the configured algorithm) of stream, +// if a digest with the configured algorithm is not supplied in the provided blobInfo; // otherwise blobInfo.Digest will be used. // The caller MUST use the returned stream instead of the original value. -func DigestIfCanonicalUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) { +func DigestIfConfiguredUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) { d := blobInfo.Digest - return newDigester(stream, d, d != "" && d.Algorithm() == digest.Canonical) + configuredAlgorithm := supportedDigests.TmpDigestForNewObjects() + return newDigester(stream, d, d != "" && d.Algorithm() == configuredAlgorithm) } // Digest() returns a digest value possibly computed by Digester. diff --git a/image/internal/putblobdigest/put_blob_digest_test.go b/image/internal/putblobdigest/put_blob_digest_test.go index f7fe92feb7..a44ecbf889 100644 --- a/image/internal/putblobdigest/put_blob_digest_test.go +++ b/image/internal/putblobdigest/put_blob_digest_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.podman.io/image/v5/types" + supportedDigests "go.podman.io/storage/pkg/supported-digests" ) var testData = []byte("test data") @@ -33,6 +34,42 @@ func testDigester(t *testing.T, constructor func(io.Reader, types.BlobInfo) (Dig } } +// TestDigestAlgorithmConfiguration tests that the digest algorithm configuration works correctly +func TestDigestAlgorithmConfiguration(t *testing.T) { + // Save original algorithm and restore it after the test + originalAlgorithm := supportedDigests.TmpDigestForNewObjects() + defer func() { + err := supportedDigests.TmpSetDigestForNewObjects(originalAlgorithm) + require.NoError(t, err) + }() + + // Test with SHA256 (default) + err := supportedDigests.TmpSetDigestForNewObjects(digest.SHA256) + require.NoError(t, err) + + stream := bytes.NewReader(testData) + digester, newStream := DigestIfConfiguredUnknown(stream, types.BlobInfo{Digest: digest.Digest("")}) + _, err = io.ReadAll(newStream) + require.NoError(t, err) + + // The digest should be computed using SHA256 + expectedSHA256 := digest.SHA256.FromBytes(testData) + assert.Equal(t, expectedSHA256, digester.Digest()) + + // Test with SHA512 + err = supportedDigests.TmpSetDigestForNewObjects(digest.SHA512) + require.NoError(t, err) + + stream = bytes.NewReader(testData) + digester, newStream = DigestIfConfiguredUnknown(stream, types.BlobInfo{Digest: digest.Digest("")}) + _, err = io.ReadAll(newStream) + require.NoError(t, err) + + // The digest should be computed using SHA512 + expectedSHA512 := digest.SHA512.FromBytes(testData) + assert.Equal(t, expectedSHA512, digester.Digest()) +} + func TestDigestIfUnknown(t *testing.T) { testDigester(t, DigestIfUnknown, []testCase{ { @@ -40,6 +77,16 @@ func TestDigestIfUnknown(t *testing.T) { computesDigest: false, expectedDigest: digest.Digest("sha256:uninspected-value"), }, + { + inputDigest: digest.Digest("sha512:uninspected-value"), + computesDigest: false, + expectedDigest: digest.Digest("sha512:uninspected-value"), + }, + { + inputDigest: digest.Digest(""), + computesDigest: true, + expectedDigest: digest.SHA256.FromBytes(testData), + }, { inputDigest: digest.Digest("unknown-algorithm:uninspected-value"), computesDigest: false, @@ -53,22 +100,65 @@ func TestDigestIfUnknown(t *testing.T) { }) } -func TestDigestIfCanonicalUnknown(t *testing.T) { - testDigester(t, DigestIfCanonicalUnknown, []testCase{ +func TestDigestIfConfiguredUnknown(t *testing.T) { + // Save original algorithm and restore it after the test + originalAlgorithm := supportedDigests.TmpDigestForNewObjects() + defer func() { + err := supportedDigests.TmpSetDigestForNewObjects(originalAlgorithm) + require.NoError(t, err) + }() + + // Test with SHA256 (default) - this exercises the default behavior + err := supportedDigests.TmpSetDigestForNewObjects(digest.SHA256) + require.NoError(t, err) + + testDigester(t, DigestIfConfiguredUnknown, []testCase{ { inputDigest: digest.Digest("sha256:uninspected-value"), computesDigest: false, expectedDigest: digest.Digest("sha256:uninspected-value"), }, + { + inputDigest: digest.Digest("sha512:uninspected-value"), + computesDigest: true, + expectedDigest: digest.SHA256.FromBytes(testData), + }, { inputDigest: digest.Digest("unknown-algorithm:uninspected-value"), computesDigest: true, - expectedDigest: digest.Canonical.FromBytes(testData), + expectedDigest: digest.SHA256.FromBytes(testData), }, { inputDigest: "", computesDigest: true, - expectedDigest: digest.Canonical.FromBytes(testData), + expectedDigest: digest.SHA256.FromBytes(testData), + }, + }) + + // Test with SHA512 - this exercises the newly added SHA512 functionality + err = supportedDigests.TmpSetDigestForNewObjects(digest.SHA512) + require.NoError(t, err) + + testDigester(t, DigestIfConfiguredUnknown, []testCase{ + { + inputDigest: digest.Digest("sha256:uninspected-value"), + computesDigest: true, + expectedDigest: digest.SHA512.FromBytes(testData), + }, + { + inputDigest: digest.Digest("sha512:uninspected-value"), + computesDigest: false, + expectedDigest: digest.Digest("sha512:uninspected-value"), + }, + { + inputDigest: digest.Digest("unknown-algorithm:uninspected-value"), + computesDigest: true, + expectedDigest: digest.SHA512.FromBytes(testData), + }, + { + inputDigest: "", + computesDigest: true, + expectedDigest: digest.SHA512.FromBytes(testData), }, }) } diff --git a/image/internal/streamdigest/stream_digest.go b/image/internal/streamdigest/stream_digest.go index 83608e04a6..7f4191dbfe 100644 --- a/image/internal/streamdigest/stream_digest.go +++ b/image/internal/streamdigest/stream_digest.go @@ -23,7 +23,7 @@ func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *type diskBlob.Close() os.Remove(diskBlob.Name()) } - digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, *inputInfo) + digester, stream := putblobdigest.DigestIfConfiguredUnknown(stream, *inputInfo) written, err := io.Copy(diskBlob, stream) if err != nil { cleanup() diff --git a/image/oci/layout/oci_dest.go b/image/oci/layout/oci_dest.go index 48fe812df5..c26a1930c0 100644 --- a/image/oci/layout/oci_dest.go +++ b/image/oci/layout/oci_dest.go @@ -134,7 +134,7 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io. } }() - digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) + digester, stream := putblobdigest.DigestIfConfiguredUnknown(stream, inputInfo) // TODO: This can take quite some time, and should ideally be cancellable using ctx.Done(). size, err := io.Copy(blobFile, stream) if err != nil { diff --git a/image/pkg/blobcache/dest.go b/image/pkg/blobcache/dest.go index c1f74a42cd..84b67a30c9 100644 --- a/image/pkg/blobcache/dest.go +++ b/image/pkg/blobcache/dest.go @@ -21,6 +21,7 @@ import ( "go.podman.io/image/v5/types" "go.podman.io/storage/pkg/archive" "go.podman.io/storage/pkg/ioutils" + supportedDigests "go.podman.io/storage/pkg/supported-digests" ) type blobCacheDestination struct { @@ -92,7 +93,7 @@ func (d *blobCacheDestination) saveStream(wg *sync.WaitGroup, decompressReader i } }() - digester := digest.Canonical.Digester() + digester := supportedDigests.TmpDigestForNewObjects().Digester() if err := func() error { // A scope for defer defer tempFile.Close() diff --git a/image/pkg/digestvalidation/digest_validation.go b/image/pkg/digestvalidation/digest_validation.go new file mode 100644 index 0000000000..130762daba --- /dev/null +++ b/image/pkg/digestvalidation/digest_validation.go @@ -0,0 +1,50 @@ +package digestvalidation + +import ( + "fmt" + + "github.com/opencontainers/go-digest" + supportedDigests "go.podman.io/storage/pkg/supported-digests" +) + +// ValidateBlobAgainstDigest validates that the provided blob matches the expected digest. +// It performs comprehensive validation to prevent panics from malformed digests or unsupported algorithms. +// +// This function handles the following validation steps: +// 1. Empty digest check +// 2. Digest format validation using digest.Parse() +// 3. Algorithm validation +// 4. Algorithm support validation using supported-digests package +// 5. Content validation by computing and comparing digests +// +// Returns an error if any validation step fails, with specific error messages for different failure cases. +func ValidateBlobAgainstDigest(blob []byte, expectedDigest digest.Digest) error { + // Validate the digest format to prevent panics from invalid digests + if expectedDigest == "" { + return fmt.Errorf("expected digest is empty") + } + + // Parse the digest to validate its format before calling Algorithm() + parsedDigest, err := digest.Parse(expectedDigest.String()) + if err != nil { + return fmt.Errorf("invalid digest format: %s", expectedDigest) + } + + algorithm := parsedDigest.Algorithm() + if algorithm == "" { + return fmt.Errorf("invalid digest algorithm: %s", expectedDigest) + } + + // Validate that the algorithm is supported to prevent panics from FromBytes + if !supportedDigests.IsSupportedDigestAlgorithm(algorithm) { + return fmt.Errorf("unsupported digest algorithm: %s (supported: %v)", algorithm, supportedDigests.GetSupportedDigestAlgorithms()) + } + + // Compute the actual digest of the blob + computedDigest := algorithm.FromBytes(blob) + if computedDigest != expectedDigest { + return fmt.Errorf("blob digest mismatch: expected %s, got %s", expectedDigest, computedDigest) + } + + return nil +} diff --git a/image/pkg/digestvalidation/digest_validation_test.go b/image/pkg/digestvalidation/digest_validation_test.go new file mode 100644 index 0000000000..9ef18b5657 --- /dev/null +++ b/image/pkg/digestvalidation/digest_validation_test.go @@ -0,0 +1,137 @@ +package digestvalidation + +import ( + "testing" + + "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var testData = []byte("test data for digest validation") + +func TestValidateBlobAgainstDigest(t *testing.T) { + tests := []struct { + name string + blob []byte + expectedDigest digest.Digest + expectError bool + errorContains string + }{ + { + name: "valid SHA256 digest", + blob: testData, + expectedDigest: digest.SHA256.FromBytes(testData), + expectError: false, + }, + { + name: "valid SHA512 digest", + blob: testData, + expectedDigest: digest.SHA512.FromBytes(testData), + expectError: false, + }, + { + name: "empty digest", + blob: testData, + expectedDigest: digest.Digest(""), + expectError: true, + errorContains: "expected digest is empty", + }, + { + name: "malformed digest format", + blob: testData, + expectedDigest: digest.Digest("invalid-format"), + expectError: true, + errorContains: "invalid digest format", + }, + { + name: "digest with invalid algorithm", + blob: testData, + expectedDigest: digest.Digest("invalid:1234567890abcdef"), + expectError: true, + errorContains: "invalid digest format", + }, + { + name: "unsupported algorithm (SHA384)", + blob: testData, + expectedDigest: digest.Digest("sha384:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), + expectError: true, + errorContains: "unsupported digest algorithm", + }, + { + name: "unsupported algorithm (MD5)", + blob: testData, + expectedDigest: digest.Digest("md5:d41d8cd98f00b204e9800998ecf8427e"), + expectError: true, + errorContains: "", // Any error is acceptable for unsupported algorithms + }, + { + name: "digest mismatch", + blob: testData, + expectedDigest: digest.SHA256.FromBytes([]byte("different data")), + expectError: true, + errorContains: "blob digest mismatch", + }, + { + name: "empty blob with valid digest", + blob: []byte{}, + expectedDigest: digest.SHA256.FromBytes([]byte{}), + expectError: false, + }, + { + name: "empty blob with wrong digest", + blob: []byte{}, + expectedDigest: digest.SHA256.FromBytes(testData), + expectError: true, + errorContains: "blob digest mismatch", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ValidateBlobAgainstDigest(tt.blob, tt.expectedDigest) + + if tt.expectError { + require.Error(t, err) + if tt.errorContains != "" { + assert.Contains(t, err.Error(), tt.errorContains) + } + } else { + require.NoError(t, err) + } + }) + } +} + +func TestValidateBlobAgainstDigest_EdgeCases(t *testing.T) { + t.Run("very large blob", func(t *testing.T) { + // Create a larger blob to test with more substantial data + largeData := make([]byte, 1024*1024) // 1MB + for i := range largeData { + largeData[i] = byte(i % 256) + } + + expectedDigest := digest.SHA256.FromBytes(largeData) + err := ValidateBlobAgainstDigest(largeData, expectedDigest) + require.NoError(t, err) + }) + + t.Run("SHA512 with large blob", func(t *testing.T) { + largeData := make([]byte, 1024*1024) // 1MB + for i := range largeData { + largeData[i] = byte(i % 256) + } + + expectedDigest := digest.SHA512.FromBytes(largeData) + err := ValidateBlobAgainstDigest(largeData, expectedDigest) + require.NoError(t, err) + }) + + t.Run("algorithm case sensitivity", func(t *testing.T) { + // Test that algorithm names are case-insensitive (if supported by go-digest) + // This tests the robustness of our validation + expectedDigest := digest.SHA256.FromBytes(testData) + err := ValidateBlobAgainstDigest(testData, expectedDigest) + require.NoError(t, err) + }) +} diff --git a/image/storage/storage_dest.go b/image/storage/storage_dest.go index 5909570007..bef4e3c0c9 100644 --- a/image/storage/storage_dest.go +++ b/image/storage/storage_dest.go @@ -37,6 +37,7 @@ import ( "go.podman.io/storage/pkg/chunked" "go.podman.io/storage/pkg/chunked/toc" "go.podman.io/storage/pkg/ioutils" + supportedDigests "go.podman.io/storage/pkg/supported-digests" ) var ( @@ -289,7 +290,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf } defer decompressed.Close() - diffID := digest.Canonical.Digester() + diffID := supportedDigests.TmpDigestForNewObjects().Digester() // Copy the data to the file. // TODO: This can take quite some time, and should ideally be cancellable using context.Context. _, err = io.Copy(diffID.Hash(), decompressed) @@ -1033,11 +1034,19 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si return false, err } } else if trusted.diffID != untrustedDiffID { - return false, fmt.Errorf("layer %d (blob %s) does not match config's DiffID %q", index, trusted.logString(), untrustedDiffID) + // If the algorithms don't match, we need to handle this carefully + if trusted.diffID.Algorithm() != untrustedDiffID.Algorithm() { + // This is a critical security check - we cannot allow algorithm mismatches + // without proper validation. For now, we'll reject the layer to maintain security. + return false, fmt.Errorf("layer %d diffID algorithm mismatch: trusted=%s, config=%s - this indicates a potential security issue", + index, trusted.diffID.Algorithm(), untrustedDiffID.Algorithm()) + } else { + return false, fmt.Errorf("layer %d (blob %s) does not match config's DiffID %q", index, trusted.logString(), untrustedDiffID) + } } } - id := layerID(parentLayer, trusted) + id := layerID(parentLayer, trusted, supportedDigests.TmpDigestForNewObjects()) if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { // There's already a layer that should have the right contents, just reuse it. @@ -1056,8 +1065,8 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si return false, nil } -// layerID computes a layer (“chain”) ID for (a possibly-empty parentID, trusted) -func layerID(parentID string, trusted trustedLayerIdentityData) string { +// layerID computes a layer ("chain") ID for (a possibly-empty parentID, trusted) +func layerID(parentID string, trusted trustedLayerIdentityData, algorithm digest.Algorithm) string { var component string mustHash := false if trusted.layerIdentifiedByTOC { @@ -1072,7 +1081,7 @@ func layerID(parentID string, trusted trustedLayerIdentityData) string { if parentID == "" && !mustHash { return component } - return digest.Canonical.FromString(parentID + "+" + component).Encoded() + return algorithm.FromString(parentID + "+" + component).Encoded() } // createNewLayer creates a new layer newLayerID for (index, trusted) on top of parentLayer (which may be ""). @@ -1490,16 +1499,15 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{ Key: s.lockProtected.configDigest.String(), Data: v, - Digest: digest.Canonical.FromBytes(v), + Digest: supportedDigests.TmpDigestForNewObjects().FromBytes(v), }) } // Set up to save the options.UnparsedToplevel's manifest if it differs from // the per-platform one, which is saved below. if !bytes.Equal(toplevelManifest, s.manifest) { - manifestDigest, err := manifest.Digest(toplevelManifest) - if err != nil { - return fmt.Errorf("digesting top-level manifest: %w", err) - } + // Use the configured digest algorithm for manifest digest + algorithm := supportedDigests.TmpDigestForNewObjects() + manifestDigest := algorithm.FromBytes(toplevelManifest) key, err := manifestBigDataKey(manifestDigest) if err != nil { return err @@ -1532,7 +1540,7 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{ Key: "signatures", Data: s.signatures, - Digest: digest.Canonical.FromBytes(s.signatures), + Digest: supportedDigests.TmpDigestForNewObjects().FromBytes(s.signatures), }) } for instanceDigest, signatures := range s.signatureses { @@ -1543,7 +1551,7 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{ Key: key, Data: signatures, - Digest: digest.Canonical.FromBytes(signatures), + Digest: supportedDigests.TmpDigestForNewObjects().FromBytes(signatures), }) } @@ -1586,8 +1594,13 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options // sizes (tracked in the metadata) which might have already // been present with new values, when ideally we'd find a way // to merge them since they all apply to the same image + // Create a digest function that uses the configured algorithm and handles schema1 manifests properly + digestFunc := func(data []byte) (digest.Digest, error) { + // Use manifest.Digest to handle schema1 signature stripping properly + return manifest.Digest(data) + } for _, data := range imgOptions.BigData { - if err := s.imageRef.transport.store.SetImageBigData(img.ID, data.Key, data.Data, manifest.Digest); err != nil { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, data.Key, data.Data, digestFunc); err != nil { logrus.Debugf("error saving big data %q for image %q: %v", data.Key, img.ID, err) return fmt.Errorf("saving big data %q for image %q: %w", data.Key, img.ID, err) } @@ -1645,10 +1658,11 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options // PutManifest writes the manifest to the destination. func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte, instanceDigest *digest.Digest) error { - digest, err := manifest.Digest(manifestBlob) - if err != nil { - return err - } + // Use the configured digest algorithm for manifest digest + algorithm := supportedDigests.TmpDigestForNewObjects() + logrus.Debugf("PutManifest: Computing manifest digest using algorithm: %s", algorithm.String()) + digest := algorithm.FromBytes(manifestBlob) + logrus.Debugf("PutManifest: Computed manifest digest: %s", digest.String()) s.manifest = bytes.Clone(manifestBlob) if s.manifest == nil { // Make sure PutManifest can never succeed with s.manifest == nil s.manifest = []byte{} diff --git a/image/storage/storage_dest_test.go b/image/storage/storage_dest_test.go index 93b60e6909..40dee650cf 100644 --- a/image/storage/storage_dest_test.go +++ b/image/storage/storage_dest_test.go @@ -91,7 +91,7 @@ func TestLayerID(t *testing.T) { diffID: diffID, tocDigest: tocDigest, blobDigest: "", - }) + }, digest.Canonical) assert.Equal(t, c.expected, res) // blobDigest does not affect the layer ID res = layerID(c.parentID, trustedLayerIdentityData{ @@ -99,7 +99,7 @@ func TestLayerID(t *testing.T) { diffID: diffID, tocDigest: tocDigest, blobDigest: blobDigest, - }) + }, digest.Canonical) assert.Equal(t, c.expected, res) } } diff --git a/image/storage/storage_reference.go b/image/storage/storage_reference.go index dacffeef78..55cc4ed2cb 100644 --- a/image/storage/storage_reference.go +++ b/image/storage/storage_reference.go @@ -35,7 +35,7 @@ func newReference(transport storageTransport, named reference.Named, id string) return nil, fmt.Errorf("reference %s has neither a tag nor a digest: %w", named.String(), ErrInvalidReference) } if id != "" { - if err := validateImageID(id); err != nil { + if err := ValidateImageID(id); err != nil { return nil, fmt.Errorf("invalid ID value %q: %v: %w", id, err.Error(), ErrInvalidReference) } } diff --git a/image/storage/storage_transport.go b/image/storage/storage_transport.go index 2f0a18787c..6b3bcd17ba 100644 --- a/image/storage/storage_transport.go +++ b/image/storage/storage_transport.go @@ -15,6 +15,7 @@ import ( "go.podman.io/image/v5/types" "go.podman.io/storage" "go.podman.io/storage/pkg/idtools" + supportedDigests "go.podman.io/storage/pkg/supported-digests" ) const ( @@ -156,7 +157,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) ( // If it looks like a digest, leave it alone for now. if _, err := digest.Parse(possibleID); err != nil { // Otherwise… - if err := validateImageID(possibleID); err == nil { + if err := ValidateImageID(possibleID); err == nil { id = possibleID // … it is a full ID } else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) { // … it is a truncated version of the ID of an image that's present in local storage, @@ -385,7 +386,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { switch len(fields) { case 1: // name only case 2: // name:tag@ID or name[:tag]@digest - if idErr := validateImageID(fields[1]); idErr != nil { + if idErr := ValidateImageID(fields[1]); idErr != nil { if _, digestErr := digest.Parse(fields[1]); digestErr != nil { return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error()) } @@ -394,7 +395,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { if _, err := digest.Parse(fields[1]); err != nil { return err } - if err := validateImageID(fields[2]); err != nil { + if err := ValidateImageID(fields[2]); err != nil { return err } default: // Coverage: This should never happen @@ -407,8 +408,34 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { return nil } -// validateImageID returns nil if id is a valid (full) image ID, or an error -func validateImageID(id string) error { - _, err := digest.Parse("sha256:" + id) - return err +// ValidateImageID returns nil if id is a valid (full) image ID, or an error +func ValidateImageID(id string) error { + // Get all supported algorithms dynamically + supportedAlgorithms := supportedDigests.GetSupportedDigestAlgorithms() + + // Try each supported algorithm based on the ID length + for _, algorithm := range supportedAlgorithms { + expectedLength, supported := supportedDigests.GetDigestAlgorithmExpectedLength(algorithm) + if !supported { + // Skip algorithms we don't know how to handle yet + continue + } + + if len(id) == expectedLength { + _, err := digest.Parse(algorithm.String() + ":" + id) + return err + } + } + + // Invalid length - build error message with supported lengths + var supportedLengths []string + for _, algorithm := range supportedAlgorithms { + if expectedLength, supported := supportedDigests.GetDigestAlgorithmExpectedLength(algorithm); supported { + algorithmName := supportedDigests.GetDigestAlgorithmName(algorithm) + supportedLengths = append(supportedLengths, fmt.Sprintf("%d (%s)", expectedLength, algorithmName)) + } + } + + return fmt.Errorf("invalid image ID length: expected %s characters, got %d", + strings.Join(supportedLengths, " or "), len(id)) }