diff --git a/deployment/build-and-stage.yaml b/deployment/build-and-stage.yaml index 43a7b2aed8a..35f03e9fa14 100644 --- a/deployment/build-and-stage.yaml +++ b/deployment/build-and-stage.yaml @@ -132,6 +132,20 @@ steps: args: ['push', '--all-tags', 'gcr.io/oss-vdb/relations'] waitFor: ['build-relations', 'cloud-build-queue'] +- name: 'gcr.io/cloud-builders/docker' + entrypoint: 'bash' + args: ['-c', 'docker pull gcr.io/oss-vdb/generatesitemap:latest || exit 0'] + id: 'pull-generatesitemap' + waitFor: ['setup'] +- name: gcr.io/cloud-builders/docker + args: ['build', '-t', 'gcr.io/oss-vdb/generatesitemap:latest', '-t', 'gcr.io/oss-vdb/generatesitemap:$COMMIT_SHA', '-f', 'cmd/generatesitemap/Dockerfile', '--cache-from', 'gcr.io/oss-vdb/generatesitemap:latest', '--pull', '.'] + dir: 'go' + id: 'build-generatesitemap' + waitFor: ['pull-generatesitemap'] +- name: gcr.io/cloud-builders/docker + args: ['push', '--all-tags', 'gcr.io/oss-vdb/generatesitemap'] + waitFor: ['build-generatesitemap', 'cloud-build-queue'] + - name: 'gcr.io/cloud-builders/docker' entrypoint: 'bash' args: ['-c', 'docker pull gcr.io/oss-vdb/custommetrics:latest || exit 0'] @@ -372,6 +386,7 @@ steps: record-checker=gcr.io/oss-vdb/record-checker:$COMMIT_SHA,\ custommetrics=gcr.io/oss-vdb/custommetrics:$COMMIT_SHA,\ relations=gcr.io/oss-vdb/relations:$COMMIT_SHA,\ + generatesitemap=gcr.io/oss-vdb/generatesitemap:$COMMIT_SHA,\ gitter=gcr.io/oss-vdb/gitter:$COMMIT_SHA" ] dir: deployment/clouddeploy/gke-workers @@ -432,4 +447,5 @@ images: - 'gcr.io/oss-vdb/record-checker:$COMMIT_SHA' - 'gcr.io/oss-vdb/custommetrics:$COMMIT_SHA' - 'gcr.io/oss-vdb/relations:$COMMIT_SHA' +- 'gcr.io/oss-vdb/generatesitemap:$COMMIT_SHA' - 'gcr.io/oss-vdb/gitter:$COMMIT_SHA' diff --git a/deployment/clouddeploy/gke-workers/base/generate-sitemap.yaml b/deployment/clouddeploy/gke-workers/base/generate-sitemap.yaml index 442485dc7c3..c5154a81b75 100644 --- a/deployment/clouddeploy/gke-workers/base/generate-sitemap.yaml +++ b/deployment/clouddeploy/gke-workers/base/generate-sitemap.yaml @@ -13,9 +13,8 @@ spec: spec: containers: - name: generate-sitemap-cron - image: cron + image: generatesitemap imagePullPolicy: Always - command: ["/usr/local/bin/generate_sitemap/generate_and_upload.sh"] resources: requests: cpu: "1" diff --git a/deployment/clouddeploy/gke-workers/environments/oss-vdb-test/generate-sitemap.yaml b/deployment/clouddeploy/gke-workers/environments/oss-vdb-test/generate-sitemap.yaml index 67e129007c3..d8aa136ec40 100644 --- a/deployment/clouddeploy/gke-workers/environments/oss-vdb-test/generate-sitemap.yaml +++ b/deployment/clouddeploy/gke-workers/environments/oss-vdb-test/generate-sitemap.yaml @@ -10,9 +10,11 @@ spec: containers: - name: generate-sitemap-cron env: - - name: BASE_URL - value: "https://test.osv.dev" - name: GOOGLE_CLOUD_PROJECT value: oss-vdb-test - - name: OUTPUT_GCS_BUCKET - value: test-osv-dev-sitemap + args: + - "--base-url=https://test.osv.dev" + - "--osv-vulns-bucket=osv-test-vulnerabilities" + - "--upload-to-gcs=true" + - "--bucket=test-osv-dev-sitemap" + diff --git a/deployment/clouddeploy/gke-workers/environments/oss-vdb/generate-sitemap.yaml b/deployment/clouddeploy/gke-workers/environments/oss-vdb/generate-sitemap.yaml index d8ba20f1706..ed1b0071674 100644 --- a/deployment/clouddeploy/gke-workers/environments/oss-vdb/generate-sitemap.yaml +++ b/deployment/clouddeploy/gke-workers/environments/oss-vdb/generate-sitemap.yaml @@ -10,9 +10,11 @@ spec: containers: - name: generate-sitemap-cron env: - - name: BASE_URL - value: "https://osv.dev" - name: GOOGLE_CLOUD_PROJECT value: oss-vdb - - name: OUTPUT_GCS_BUCKET - value: osv-dev-sitemap + args: + - "--base-url=https://osv.dev" + - "--osv-vulns-bucket=osv-vulnerabilities" + - "--upload-to-gcs=true" + - "--bucket=osv-dev-sitemap" + diff --git a/go/cmd/generatesitemap/Dockerfile b/go/cmd/generatesitemap/Dockerfile new file mode 100644 index 00000000000..626c8cd0ae4 --- /dev/null +++ b/go/cmd/generatesitemap/Dockerfile @@ -0,0 +1,31 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM golang:1.25.5-alpine@sha256:3587db7cc96576822c606d119729370dbf581931c5f43ac6d3fa03ab4ed85a10 AS build + +WORKDIR /src + +COPY ./go.mod /src/go.mod +COPY ./go.sum /src/go.sum +RUN go mod download && go mod verify + + +COPY ./ /src/ +RUN CGO_ENABLED=0 go build -o generatesitemap ./cmd/generatesitemap + +FROM gcr.io/distroless/static-debian12@sha256:87bce11be0af225e4ca761c40babb06d6d559f5767fbf7dc3c47f0f1a466b92c + +COPY --from=build /src/generatesitemap / + +ENTRYPOINT ["/generatesitemap"] diff --git a/go/cmd/generatesitemap/generatesitemap.go b/go/cmd/generatesitemap/generatesitemap.go new file mode 100644 index 00000000000..49136fcb212 --- /dev/null +++ b/go/cmd/generatesitemap/generatesitemap.go @@ -0,0 +1,351 @@ +// Package main runs the sitemap generator, creating sitemaps from vulnerability data in GCS. +package main + +import ( + "context" + "encoding/xml" + "errors" + "flag" + "fmt" + "hash/crc32" + "log/slog" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "cloud.google.com/go/storage" + "github.com/google/osv.dev/go/logger" + "github.com/google/osv.dev/go/osv/clients" + "github.com/ossf/osv-schema/bindings/go/osvschema" + "google.golang.org/protobuf/proto" +) + +const ( + gcsProtoPrefix = "all/pb/" + sitemapURLLimit = 49999 + sitemapPrefix = "sitemap_" +) + +// SitemapEntry represents a single URL entry in the sitemap. +type SitemapEntry struct { + XMLName xml.Name `xml:"url"` + Loc string `xml:"loc"` + LastModified string `xml:"lastmod"` +} + +// URLSet represents the root element of a sitemap. +type URLSet struct { + XMLName xml.Name `xml:"urlset"` + XMLNS string `xml:"xmlns,attr"` + URLs []SitemapEntry `xml:"url"` +} + +// SitemapIndexEntry represents a single sitemap entry in the sitemap index. +type SitemapIndexEntry struct { + XMLName xml.Name `xml:"sitemap"` + Loc string `xml:"loc"` + LastModified string `xml:"lastmod"` +} + +// SitemapIndex represents the root element of a sitemap index. +type SitemapIndex struct { + XMLName xml.Name `xml:"sitemapindex"` + XMLNS string `xml:"xmlns,attr"` + Sitemaps []SitemapIndexEntry `xml:"sitemap"` +} + +// Entry holds the minimal data needed for sitemap generation. +type Entry struct { + ID string + LastModified time.Time +} + +func main() { + logger.InitGlobalLogger() + + baseURL := flag.String("base-url", "https://osv.dev", "The base URL for the sitemap entries (without trailing /).") + vulnBucketName := flag.String("osv-vulns-bucket", os.Getenv("OSV_VULNERABILITIES_BUCKET"), "GCS bucket to read vulnerability protobufs from.") + outputDir := flag.String("bucket", "sitemap_output", "Output bucket or directory name. If -upload-to-gcs is true, this is a GCS bucket name; otherwise, it's a local directory.") + uploadToGCS := flag.Bool("upload-to-gcs", false, "If true, writes the output to a GCS bucket specified by -bucket.") + numWorkers := flag.Int("workers", 200, "The total number of concurrent workers to use.") + + flag.Parse() + + if *vulnBucketName == "" { + logger.Fatal("OSV_VULNERABILITIES_BUCKET must be set") + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + storageClient, err := storage.NewClient(ctx) + if err != nil { + logger.Fatal("failed to create storage client", slog.Any("err", err)) + } + defer storageClient.Close() + + vulnClient := clients.NewGCSClient(storageClient, *vulnBucketName) + + var outClient clients.CloudStorage + if *uploadToGCS { + outClient = clients.NewGCSClient(storageClient, *outputDir) + } else { + if err := os.MkdirAll(*outputDir, 0755); err != nil { + logger.Fatal("failed to create output directory", slog.Any("err", err)) + } + } + + // Channel for GCS paths + gcsPathCh := make(chan string) + // Channel for parsed entries + entryCh := make(chan *osvschema.Vulnerability) + + // Start listing objects for workers to consume + go func() { + defer close(gcsPathCh) + if err := listObjects(ctx, vulnClient, gcsPathCh); err != nil { + logger.Fatal("failed to list objects", slog.Any("err", err)) + } + }() + + var wg sync.WaitGroup + // Start workers + for range *numWorkers { + wg.Go(func() { downloader(ctx, vulnClient, gcsPathCh, entryCh) }) + } + + // Aggregate entries by ecosystem + ecosystemEntries := make(map[string][]Entry) + + // Process entries as they come in + go func() { + wg.Wait() + close(entryCh) + }() + + count := 0 + for vuln := range entryCh { + count++ + lastMod := time.Unix(0, 0).UTC() + if vuln.GetModified() != nil { + lastMod = vuln.GetModified().AsTime().UTC() + } + + // Collect ecosystems + ecosystems := make(map[string]struct{}) + for _, affected := range vuln.GetAffected() { + if affected.GetPackage() != nil && affected.GetPackage().GetEcosystem() != "" { + eco, _, _ := strings.Cut(affected.GetPackage().GetEcosystem(), ":") + ecosystems[eco] = struct{}{} + } + // Check for GIT ranges + for _, r := range affected.GetRanges() { + if r.GetType() == osvschema.Range_GIT { + ecosystems["GIT"] = struct{}{} + } + } + } + if len(ecosystems) == 0 { + ecosystems["[EMPTY]"] = struct{}{} + } + + entry := Entry{ + ID: vuln.GetId(), + LastModified: lastMod, + } + + for eco := range ecosystems { + ecosystemEntries[eco] = append(ecosystemEntries[eco], entry) + } + } + + logger.Info("processed vulnerabilities", slog.Int("count", count)) + + // Generate sitemaps + if err := generateSitemaps(ctx, outClient, *outputDir, *baseURL, ecosystemEntries); err != nil { + logger.Fatal("failed to generate sitemaps", slog.Any("err", err)) + } + + logger.Info("sitemap generation complete") +} + +func listObjects(ctx context.Context, client clients.CloudStorage, outCh chan<- string) error { + prevPrefix := "" + for name, err := range client.Objects(ctx, gcsProtoPrefix) { + if err != nil { + return err + } + // Only log when we see a new ID prefix (i.e. roughly once per data source) + prefix := filepath.Base(name) + prefix, _, _ = strings.Cut(prefix, "-") + if prefix != prevPrefix { + logger.Info("iterating vulnerabilities", slog.String("now_at", name)) + prevPrefix = prefix + } + select { + case outCh <- name: + case <-ctx.Done(): + return ctx.Err() + } + } + + return nil +} + +func downloader(ctx context.Context, client clients.CloudStorage, inCh <-chan string, outCh chan<- *osvschema.Vulnerability) { + for path := range inCh { + // Download and parse + content, err := client.ReadObject(ctx, path) + if err != nil { + logger.Error("failed to read content", slog.String("path", path), slog.Any("err", err)) + continue + } + + var vuln osvschema.Vulnerability + if err := proto.Unmarshal(content, &vuln); err != nil { + logger.Error("failed to unmarshal protobuf", slog.String("path", path), slog.Any("err", err)) + continue + } + + select { + case outCh <- &vuln: + case <-ctx.Done(): + return + } + } +} + +func generateSitemaps(ctx context.Context, client clients.CloudStorage, outputDir, baseURL string, entries map[string][]Entry) error { + var sitemapIndexEntries []SitemapIndexEntry + + // Sort ecosystems for deterministic output + ecosystems := make([]string, 0, len(entries)) + for eco := range entries { + ecosystems = append(ecosystems, eco) + } + sort.Strings(ecosystems) + + for _, eco := range ecosystems { + vulns := entries[eco] + // Sort by LastModified descending + sort.Slice(vulns, func(i, j int) bool { + return vulns[i].LastModified.After(vulns[j].LastModified) + }) + + // Split into chunks + chunks := chunkEntries(vulns, sitemapURLLimit) + + sanitizedEco := sanitizeEcosystem(eco) + + for i, chunk := range chunks { + filename := fmt.Sprintf("%s%s.xml", sitemapPrefix, sanitizedEco) + if len(chunks) > 1 { + filename = fmt.Sprintf("%s%s_%d.xml", sitemapPrefix, sanitizedEco, i+1) + } + + path := filename + if client == nil { + path = filepath.Join(outputDir, filename) + } + + if err := writeSitemap(ctx, client, path, baseURL, chunk); err != nil { + return err + } + + // Add to index + chunkLastMod := time.Unix(0, 0).UTC() + if len(chunk) > 0 { + chunkLastMod = chunk[0].LastModified + } + + sitemapIndexEntries = append(sitemapIndexEntries, SitemapIndexEntry{ + Loc: fmt.Sprintf("%s/%s", baseURL, filename), + LastModified: chunkLastMod.Format(time.RFC3339), + }) + } + } + + // Write sitemap index + filename := sitemapPrefix + "index.xml" + path := filename + if client == nil { + path = filepath.Join(outputDir, filename) + } + + return writeSitemapIndex(ctx, client, path, sitemapIndexEntries) +} + +func chunkEntries(entries []Entry, limit int) [][]Entry { + var chunks [][]Entry + for i := 0; i < len(entries); i += limit { + end := min(i+limit, len(entries)) + chunks = append(chunks, entries[i:end]) + } + + return chunks +} + +func sanitizeEcosystem(eco string) string { + s := strings.TrimSpace(eco) + s = strings.ReplaceAll(s, " ", "_") + s = strings.ReplaceAll(s, ".", "__") + + return s +} + +func writeSitemap(ctx context.Context, client clients.CloudStorage, path, baseURL string, entries []Entry) error { + urlSet := URLSet{ + XMLNS: "http://www.sitemaps.org/schemas/sitemap/0.9", + } + + for _, e := range entries { + urlSet.URLs = append(urlSet.URLs, SitemapEntry{ + Loc: fmt.Sprintf("%s/vulnerability/%s", baseURL, e.ID), + LastModified: e.LastModified.Format(time.RFC3339), + }) + } + + return writeSitemapFile(ctx, client, path, urlSet) +} + +func writeSitemapIndex(ctx context.Context, client clients.CloudStorage, path string, entries []SitemapIndexEntry) error { + index := SitemapIndex{ + XMLNS: "http://www.sitemaps.org/schemas/sitemap/0.9", + Sitemaps: entries, + } + + return writeSitemapFile(ctx, client, path, index) +} + +// crc32Table uses the Castagnoli CRC32 polynomial for checksums to match GCS. +var crc32Table = crc32.MakeTable(crc32.Castagnoli) + +func writeSitemapFile(ctx context.Context, client clients.CloudStorage, path string, v any) error { + data, err := xml.MarshalIndent(v, "", " ") + if err != nil { + return err + } + data = append([]byte(xml.Header), data...) + + if client == nil { + return os.WriteFile(path, data, 0600) + } + + attrs, err := client.ReadObjectAttrs(ctx, path) + if err == nil { + checksum := crc32.Checksum(data, crc32Table) + if checksum == attrs.CRC32C { + logger.Info("skipping upload since checksum is unchanged", + slog.String("path", path), slog.Any("crc32c", checksum)) + + return nil + } + } else if !errors.Is(err, clients.ErrNotFound) { + return fmt.Errorf("failed getting checksum: %w", err) + } + + return client.WriteObject(ctx, path, data, nil) +} diff --git a/go/cmd/generatesitemap/generatesitemap_test.go b/go/cmd/generatesitemap/generatesitemap_test.go new file mode 100644 index 00000000000..3885f8b637a --- /dev/null +++ b/go/cmd/generatesitemap/generatesitemap_test.go @@ -0,0 +1,148 @@ +package main + +import ( + "context" + "encoding/xml" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + "time" +) + +func TestSanitizeEcosystem(t *testing.T) { + tests := []struct { + input string + expected string + }{ + {"Go", "Go"}, + {"Rocky Linux", "Rocky_Linux"}, + {"crates.io", "crates__io"}, + {" spaced ", "spaced"}, + } + + for _, test := range tests { + got := sanitizeEcosystem(test.input) + if got != test.expected { + t.Errorf("sanitizeEcosystem(%q) = %q, want %q", test.input, got, test.expected) + } + } +} + +func TestChunkEntries(t *testing.T) { + entries := make([]Entry, 105) + for i := range 105 { + entries[i] = Entry{ID: "id"} + } + + chunks := chunkEntries(entries, 50) + if len(chunks) != 3 { + t.Errorf("expected 3 chunks, got %d", len(chunks)) + } + if len(chunks[0]) != 50 { + t.Errorf("expected chunk 0 to have 50 entries, got %d", len(chunks[0])) + } + if len(chunks[1]) != 50 { + t.Errorf("expected chunk 1 to have 50 entries, got %d", len(chunks[1])) + } + if len(chunks[2]) != 5 { + t.Errorf("expected chunk 2 to have 5 entries, got %d", len(chunks[2])) + } +} + +func TestGenerateSitemaps(t *testing.T) { + tmpDir := t.TempDir() + + entries := map[string][]Entry{ + "Go": { + {ID: "GO-1", LastModified: time.Unix(1000, 0).UTC()}, + {ID: "GO-2", LastModified: time.Unix(2000, 0).UTC()}, + }, + "Rocky Linux": { + {ID: "RL-1", LastModified: time.Unix(3000, 0).UTC()}, + }, + "LargeEco": make([]Entry, 60000), // Should split into 2 files (limit is 49999) + } + + // Fill LargeEco + for i := range 60000 { + entries["LargeEco"][i] = Entry{ + ID: fmt.Sprintf("LE-%d", i), + LastModified: time.Unix(int64(i), 0).UTC(), + } + } + + err := generateSitemaps(context.Background(), nil, tmpDir, "https://example.com", entries) + if err != nil { + t.Fatalf("generateSitemaps failed: %v", err) + } + + // Check files + files, _ := filepath.Glob(filepath.Join(tmpDir, "*.xml")) + expectedFiles := []string{ + "sitemap_Go.xml", + "sitemap_Rocky_Linux.xml", + "sitemap_LargeEco_1.xml", + "sitemap_LargeEco_2.xml", + "sitemap_index.xml", + } + + fileMap := make(map[string]bool) + for _, f := range files { + fileMap[filepath.Base(f)] = true + } + + for _, f := range expectedFiles { + if !fileMap[f] { + t.Errorf("expected file %s not found", f) + } + } + + // Verify content of sitemap_Go.xml + content, err := os.ReadFile(filepath.Join(tmpDir, "sitemap_Go.xml")) + if err != nil { + t.Fatalf("error reading sitemap_Go.xml: %v", err) + } + var urlSet URLSet + if err := xml.Unmarshal(content, &urlSet); err != nil { + t.Fatalf("error unmarshalling sitemap_Go.xml: %v", err) + } + if len(urlSet.URLs) != 2 { + t.Errorf("expected 2 URLs in sitemap_Go.xml, got %d", len(urlSet.URLs)) + } + // Verify sorting (descending) + if urlSet.URLs[0].Loc != "https://example.com/vulnerability/GO-2" { + t.Errorf("expected GO-2 first (newer), got %s", urlSet.URLs[0].Loc) + } + + // Verify content of sitemap_index.xml + content, err = os.ReadFile(filepath.Join(tmpDir, "sitemap_index.xml")) + if err != nil { + t.Fatalf("error reading sitemap_index.xml: %v", err) + } + var index SitemapIndex + if err := xml.Unmarshal(content, &index); err != nil { + t.Fatalf("error unmarshalling sitemap_index.xml: %v", err) + } + + // We expect 4 entries in index: Go, Rocky Linux, LargeEco_1, LargeEco_2 + if len(index.Sitemaps) != 4 { + t.Errorf("expected 4 sitemaps in index, got %d", len(index.Sitemaps)) + } + + // Check LargeEco entries in index + foundLE1 := false + foundLE2 := false + for _, s := range index.Sitemaps { + if strings.HasSuffix(s.Loc, "sitemap_LargeEco_1.xml") { + foundLE1 = true + } + if strings.HasSuffix(s.Loc, "sitemap_LargeEco_2.xml") { + foundLE2 = true + } + } + if !foundLE1 || !foundLE2 { + t.Errorf("missing LargeEco sitemaps in index") + } +} diff --git a/go/osv/clients/cloudstorage.go b/go/osv/clients/cloudstorage.go index d69778a1c8d..62cdb18562e 100644 --- a/go/osv/clients/cloudstorage.go +++ b/go/osv/clients/cloudstorage.go @@ -48,6 +48,8 @@ type Attrs struct { Generation int64 // CustomTime is the custom time metadata of the object. CustomTime time.Time + // CRC32C is the CRC32 checksum of the object, using the Castagnoli93 polynomial. + CRC32C uint32 } // CloudStorage defines a generic interface for blob storage operations. diff --git a/go/osv/clients/gcs_client.go b/go/osv/clients/gcs_client.go index b302f537828..c5e4f386c5a 100644 --- a/go/osv/clients/gcs_client.go +++ b/go/osv/clients/gcs_client.go @@ -71,7 +71,11 @@ func (c *GCSClient) ReadObjectAttrs(ctx context.Context, path string) (*Attrs, e return nil, err } - return &Attrs{Generation: attrs.Generation, CustomTime: attrs.CustomTime}, nil + return &Attrs{ + Generation: attrs.Generation, + CustomTime: attrs.CustomTime, + CRC32C: attrs.CRC32C, + }, nil } func (c *GCSClient) WriteObject(ctx context.Context, path string, data []byte, opts *WriteOptions) error { diff --git a/go/testutils/gcs.go b/go/testutils/gcs.go index e19fc22d95d..c98af937165 100644 --- a/go/testutils/gcs.go +++ b/go/testutils/gcs.go @@ -2,6 +2,7 @@ package testutils import ( "context" + "hash/crc32" "iter" "slices" "strings" @@ -16,8 +17,12 @@ type mockObject struct { data []byte generation int64 customTime time.Time + crc32c uint32 } +// crc32Table uses the Castagnoli CRC32 polynomial for checksums to match GCS. +var crc32Table = crc32.MakeTable(crc32.Castagnoli) + // MockStorage implements osv.CloudStorage for testing. type MockStorage struct { mu sync.RWMutex @@ -59,7 +64,11 @@ func (c *MockStorage) ReadObjectAttrs(_ context.Context, path string) (*clients. return nil, clients.ErrNotFound } - return &clients.Attrs{Generation: obj.generation, CustomTime: obj.customTime}, nil + return &clients.Attrs{ + Generation: obj.generation, + CustomTime: obj.customTime, + CRC32C: obj.crc32c, + }, nil } func (c *MockStorage) WriteObject(_ context.Context, path string, data []byte, opts *clients.WriteOptions) error { @@ -99,6 +108,7 @@ func (c *MockStorage) WriteObject(_ context.Context, path string, data []byte, o data: data, generation: newGeneration, customTime: customTime, + crc32c: crc32.Checksum(data, crc32Table), } return nil