diff --git a/changelog/unreleased/change-ocis-s3ng-fs-blob-layout.md b/changelog/unreleased/change-ocis-s3ng-fs-blob-layout.md new file mode 100644 index 0000000000..c02b6d2e49 --- /dev/null +++ b/changelog/unreleased/change-ocis-s3ng-fs-blob-layout.md @@ -0,0 +1,80 @@ +Change: Change the oCIS and S3NG storage driver blob store layout + +We've optimized the oCIS and S3NG storage driver blob store layout. + +For the oCIS storage driver, blobs will now be stored inside the folder +of a space, next to the nodes. This allows admins to easily archive, backup and restore +spaces as a whole with UNIX tooling. We also moved from a single folder for blobs to +multiple folders for blobs, to make the filesystem interactions more performant for +large numbers of files. + +The previous layout on disk looked like this: + +```markdown +|-- spaces +| |-- .. +| | |-- .. +| |-- xx +| |-- xxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx <- partitioned space id +| |-- nodes +| |-- .. +| |-- xx +| |-- xx +| |-- xx +| |-- xx +| |-- -xxxx-xxxx-xxxx-xxxxxxxxxxxx <- partitioned node id +|-- blobs + |-- .. + |-- xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx <- blob id +``` + +Now it looks like this: + +```markdown +|-- spaces +| |-- .. +| | |-- .. + |-- xx + |-- xxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx <- partitioned space id + |-- nodes + | |-- .. + | |-- xx + | |-- xx + | |-- xx + | |-- xx + | |-- -xxxx-xxxx-xxxx-xxxxxxxxxxxx <- partitioned node id + |-- blobs + |-- .. + |-- xx + |-- xx + |-- xx + |-- xx + |-- -xxxx-xxxx-xxxx-xxxxxxxxxxxx <- partitioned blob id +``` + +For the S3NG storage driver, blobs will now be prefixed with the space id and +also a part of the blob id will be used as prefix. This creates a better prefix partitioning +and mitigates S3 api performance drops for large buckets (https://aws.amazon.com/de/premiumsupport/knowledge-center/s3-prefix-nested-folders-difference/). + +The previous S3 bucket (blobs only looked like this): + +```markdown +|-- .. +|-- xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx <- blob id +``` + +Now it looks like this: + +```markdown +|-- .. +|-- xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx <- space id + |-- .. + |-- xx + |-- xx + |-- xx + |-- xx + |-- -xxxx-xxxx-xxxx-xxxxxxxxxxxx <- partitioned blob id +``` + +https://github.com/cs3org/reva/pull/2763 +https://github.com/owncloud/ocis/issues/3557 diff --git a/pkg/storage/fs/ocis/blobstore/blobstore.go b/pkg/storage/fs/ocis/blobstore/blobstore.go index aab9f88ca4..97dc6ca111 100644 --- a/pkg/storage/fs/ocis/blobstore/blobstore.go +++ b/pkg/storage/fs/ocis/blobstore/blobstore.go @@ -24,6 +24,8 @@ import ( "os" "path/filepath" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/pkg/errors" ) @@ -45,39 +47,50 @@ func New(root string) (*Blobstore, error) { } // Upload stores some data in the blobstore under the given key -func (bs *Blobstore) Upload(key string, data io.Reader) error { - f, err := os.OpenFile(bs.path(key), os.O_CREATE|os.O_WRONLY, 0700) +func (bs *Blobstore) Upload(node *node.Node, data io.Reader) error { + + // ensure parent path exists + if err := os.MkdirAll(filepath.Dir(bs.path(node)), 0700); err != nil { + return errors.Wrap(err, "Decomposedfs: oCIS blobstore: error creating parent folders for blob") + } + + f, err := os.OpenFile(bs.path(node), os.O_CREATE|os.O_WRONLY, 0700) if err != nil { - return errors.Wrapf(err, "could not open blob '%s' for writing", key) + return errors.Wrapf(err, "could not open blob '%s' for writing", bs.path(node)) } w := bufio.NewWriter(f) _, err = w.ReadFrom(data) if err != nil { - return errors.Wrapf(err, "could not write blob '%s'", key) + return errors.Wrapf(err, "could not write blob '%s'", bs.path(node)) } return w.Flush() } // Download retrieves a blob from the blobstore for reading -func (bs *Blobstore) Download(key string) (io.ReadCloser, error) { - file, err := os.Open(bs.path(key)) +func (bs *Blobstore) Download(node *node.Node) (io.ReadCloser, error) { + file, err := os.Open(bs.path(node)) if err != nil { - return nil, errors.Wrapf(err, "could not read blob '%s'", key) + return nil, errors.Wrapf(err, "could not read blob '%s'", bs.path(node)) } return file, nil } // Delete deletes a blob from the blobstore -func (bs *Blobstore) Delete(key string) error { - err := os.Remove(bs.path(key)) +func (bs *Blobstore) Delete(node *node.Node) error { + err := os.Remove(bs.path(node)) if err != nil { - return errors.Wrapf(err, "could not delete blob '%s'", key) + return errors.Wrapf(err, "could not delete blob '%s'", bs.path(node)) } return nil } -func (bs *Blobstore) path(key string) string { - return filepath.Join(bs.root, filepath.Clean(filepath.Join("/", key))) +func (bs *Blobstore) path(node *node.Node) string { + return filepath.Join( + bs.root, + filepath.Clean(filepath.Join( + "/", "spaces", lookup.Pathify(node.SpaceID, 1, 2), "blobs", lookup.Pathify(node.BlobID, 4, 2)), + ), + ) } diff --git a/pkg/storage/fs/ocis/blobstore/blobstore_test.go b/pkg/storage/fs/ocis/blobstore/blobstore_test.go index 24d4f90838..610a13b290 100644 --- a/pkg/storage/fs/ocis/blobstore/blobstore_test.go +++ b/pkg/storage/fs/ocis/blobstore/blobstore_test.go @@ -25,6 +25,7 @@ import ( "path" "github.com/cs3org/reva/v2/pkg/storage/fs/ocis/blobstore" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/tests/helpers" . "github.com/onsi/ginkgo/v2" @@ -34,7 +35,7 @@ import ( var _ = Describe("Blobstore", func() { var ( tmpRoot string - key string + blobNode *node.Node blobPath string data []byte @@ -47,10 +48,13 @@ var _ = Describe("Blobstore", func() { Expect(err).ToNot(HaveOccurred()) data = []byte("1234567890") - key = "foo" - blobPath = path.Join(tmpRoot, "blobs", key) + blobNode = &node.Node{ + SpaceID: "wonderfullspace", + BlobID: "huuuuugeblob", + } + blobPath = path.Join(tmpRoot, "spaces", "wo", "nderfullspace", "blobs", "hu", "uu", "uu", "ge", "blob") - bs, err = blobstore.New(path.Join(tmpRoot, "blobs")) + bs, err = blobstore.New(path.Join(tmpRoot)) Expect(err).ToNot(HaveOccurred()) }) @@ -61,13 +65,13 @@ var _ = Describe("Blobstore", func() { }) It("creates the root directory if it doesn't exist", func() { - _, err := os.Stat(path.Join(tmpRoot, "blobs")) + _, err := os.Stat(path.Join(tmpRoot)) Expect(err).ToNot(HaveOccurred()) }) Describe("Upload", func() { It("writes the blob", func() { - err := bs.Upload(key, bytes.NewReader(data)) + err := bs.Upload(blobNode, bytes.NewReader(data)) Expect(err).ToNot(HaveOccurred()) writtenBytes, err := ioutil.ReadFile(blobPath) @@ -78,12 +82,13 @@ var _ = Describe("Blobstore", func() { Context("with an existing blob", func() { BeforeEach(func() { + Expect(os.MkdirAll(path.Dir(blobPath), 0700)).To(Succeed()) Expect(ioutil.WriteFile(blobPath, data, 0700)).To(Succeed()) }) Describe("Download", func() { It("cleans the key", func() { - reader, err := bs.Download("../" + key) + reader, err := bs.Download(blobNode) Expect(err).ToNot(HaveOccurred()) readData, err := ioutil.ReadAll(reader) @@ -92,7 +97,7 @@ var _ = Describe("Blobstore", func() { }) It("returns a reader to the blob", func() { - reader, err := bs.Download(key) + reader, err := bs.Download(blobNode) Expect(err).ToNot(HaveOccurred()) readData, err := ioutil.ReadAll(reader) @@ -106,7 +111,7 @@ var _ = Describe("Blobstore", func() { _, err := os.Stat(blobPath) Expect(err).ToNot(HaveOccurred()) - err = bs.Delete(key) + err = bs.Delete(blobNode) Expect(err).ToNot(HaveOccurred()) _, err = os.Stat(blobPath) diff --git a/pkg/storage/fs/ocis/ocis.go b/pkg/storage/fs/ocis/ocis.go index 2d22309df2..d84d339334 100644 --- a/pkg/storage/fs/ocis/ocis.go +++ b/pkg/storage/fs/ocis/ocis.go @@ -40,7 +40,7 @@ func New(m map[string]interface{}) (storage.FS, error) { return nil, err } - bs, err := blobstore.New(path.Join(o.Root, "blobs")) + bs, err := blobstore.New(path.Join(o.Root)) if err != nil { return nil, err } diff --git a/pkg/storage/fs/s3ng/blobstore/blobstore.go b/pkg/storage/fs/s3ng/blobstore/blobstore.go index 0599d48f35..071fdc38cc 100644 --- a/pkg/storage/fs/s3ng/blobstore/blobstore.go +++ b/pkg/storage/fs/s3ng/blobstore/blobstore.go @@ -23,7 +23,10 @@ import ( "io" "net/url" "os" + "path/filepath" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/pkg/errors" @@ -60,38 +63,50 @@ func New(endpoint, region, bucket, accessKey, secretKey string) (*Blobstore, err } // Upload stores some data in the blobstore under the given key -func (bs *Blobstore) Upload(key string, reader io.Reader) error { +func (bs *Blobstore) Upload(node *node.Node, reader io.Reader) error { size := int64(-1) if file, ok := reader.(*os.File); ok { info, err := file.Stat() if err != nil { - return errors.Wrapf(err, "could not determine file size for object '%s'", key) + return errors.Wrapf(err, "could not determine file size for object '%s'", bs.path(node)) } size = info.Size() } - _, err := bs.client.PutObject(context.Background(), bs.bucket, key, reader, size, minio.PutObjectOptions{ContentType: "application/octet-stream"}) + _, err := bs.client.PutObject(context.Background(), bs.bucket, bs.path(node), reader, size, minio.PutObjectOptions{ContentType: "application/octet-stream"}) if err != nil { - return errors.Wrapf(err, "could not store object '%s' into bucket '%s'", key, bs.bucket) + return errors.Wrapf(err, "could not store object '%s' into bucket '%s'", bs.path(node), bs.bucket) } return nil } // Download retrieves a blob from the blobstore for reading -func (bs *Blobstore) Download(key string) (io.ReadCloser, error) { - reader, err := bs.client.GetObject(context.Background(), bs.bucket, key, minio.GetObjectOptions{}) +func (bs *Blobstore) Download(node *node.Node) (io.ReadCloser, error) { + reader, err := bs.client.GetObject(context.Background(), bs.bucket, bs.path(node), minio.GetObjectOptions{}) if err != nil { - return nil, errors.Wrapf(err, "could not download object '%s' from bucket '%s'", key, bs.bucket) + return nil, errors.Wrapf(err, "could not download object '%s' from bucket '%s'", bs.path(node), bs.bucket) } return reader, nil } // Delete deletes a blob from the blobstore -func (bs *Blobstore) Delete(key string) error { - err := bs.client.RemoveObject(context.Background(), bs.bucket, key, minio.RemoveObjectOptions{}) +func (bs *Blobstore) Delete(node *node.Node) error { + err := bs.client.RemoveObject(context.Background(), bs.bucket, bs.path(node), minio.RemoveObjectOptions{}) if err != nil { - return errors.Wrapf(err, "could not delete object '%s' from bucket '%s'", key, bs.bucket) + return errors.Wrapf(err, "could not delete object '%s' from bucket '%s'", bs.path(node), bs.bucket) } return nil } + +func (bs *Blobstore) path(node *node.Node) string { + // https://aws.amazon.com/de/premiumsupport/knowledge-center/s3-prefix-nested-folders-difference/ + // Prefixes are used to partion a bucket. A prefix is everything except the filename. + // For a file `BucketName/foo/bar/lorem.ipsum`, `BucketName/foo/bar/` is the prefix. + // There are request limits per prefix, therefore you should have many prefixes. + // There are no limits to prefixes per bucket, so in general it's better to have more then less. + // + // Since the spaceID is always the same for a space, we don't need to pathify that, because it would + // not yield any performance gains + return filepath.Clean(filepath.Join(node.SpaceID, lookup.Pathify(node.BlobID, 4, 2))) +} diff --git a/pkg/storage/utils/decomposedfs/decomposedfs.go b/pkg/storage/utils/decomposedfs/decomposedfs.go index 7dbffac201..849aa27cb7 100644 --- a/pkg/storage/utils/decomposedfs/decomposedfs.go +++ b/pkg/storage/utils/decomposedfs/decomposedfs.go @@ -82,9 +82,9 @@ type Tree interface { RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPath string, target *node.Node) (*node.Node, *node.Node, func() error, error) PurgeRecycleItemFunc(ctx context.Context, spaceid, key, purgePath string) (*node.Node, func() error, error) - WriteBlob(key string, reader io.Reader) error - ReadBlob(key string) (io.ReadCloser, error) - DeleteBlob(key string) error + WriteBlob(node *node.Node, reader io.Reader) error + ReadBlob(node *node.Node) (io.ReadCloser, error) + DeleteBlob(node *node.Node) error Propagate(ctx context.Context, node *node.Node) (err error) } @@ -568,7 +568,7 @@ func (fs *Decomposedfs) Download(ctx context.Context, ref *provider.Reference) ( return nil, errtypes.PermissionDenied(filepath.Join(node.ParentID, node.Name)) } - reader, err := fs.tp.ReadBlob(node.BlobID) + reader, err := fs.tp.ReadBlob(node) if err != nil { return nil, errors.Wrap(err, "Decomposedfs: error download blob '"+node.ID+"'") } diff --git a/pkg/storage/utils/decomposedfs/mocks/Tree.go b/pkg/storage/utils/decomposedfs/mocks/Tree.go index 43891f1c19..d71ac7345a 100644 --- a/pkg/storage/utils/decomposedfs/mocks/Tree.go +++ b/pkg/storage/utils/decomposedfs/mocks/Tree.go @@ -67,13 +67,13 @@ func (_m *Tree) Delete(ctx context.Context, _a1 *node.Node) error { return r0 } -// DeleteBlob provides a mock function with given fields: key -func (_m *Tree) DeleteBlob(key string) error { - ret := _m.Called(key) +// DeleteBlob provides a mock function with given fields: _a0 +func (_m *Tree) DeleteBlob(_a0 *node.Node) error { + ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func(string) error); ok { - r0 = rf(key) + if rf, ok := ret.Get(0).(func(*node.Node) error); ok { + r0 = rf(_a0) } else { r0 = ret.Error(0) } @@ -187,13 +187,13 @@ func (_m *Tree) PurgeRecycleItemFunc(ctx context.Context, spaceid string, key st return r0, r1, r2 } -// ReadBlob provides a mock function with given fields: key -func (_m *Tree) ReadBlob(key string) (io.ReadCloser, error) { - ret := _m.Called(key) +// ReadBlob provides a mock function with given fields: _a0 +func (_m *Tree) ReadBlob(_a0 *node.Node) (io.ReadCloser, error) { + ret := _m.Called(_a0) var r0 io.ReadCloser - if rf, ok := ret.Get(0).(func(string) io.ReadCloser); ok { - r0 = rf(key) + if rf, ok := ret.Get(0).(func(*node.Node) io.ReadCloser); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(io.ReadCloser) @@ -201,8 +201,8 @@ func (_m *Tree) ReadBlob(key string) (io.ReadCloser, error) { } var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(key) + if rf, ok := ret.Get(1).(func(*node.Node) error); ok { + r1 = rf(_a0) } else { r1 = ret.Error(1) } @@ -265,13 +265,13 @@ func (_m *Tree) Setup() error { return r0 } -// WriteBlob provides a mock function with given fields: key, reader -func (_m *Tree) WriteBlob(key string, reader io.Reader) error { - ret := _m.Called(key, reader) +// WriteBlob provides a mock function with given fields: _a0, reader +func (_m *Tree) WriteBlob(_a0 *node.Node, reader io.Reader) error { + ret := _m.Called(_a0, reader) var r0 error - if rf, ok := ret.Get(0).(func(string, io.Reader) error); ok { - r0 = rf(key, reader) + if rf, ok := ret.Get(0).(func(*node.Node, io.Reader) error); ok { + r0 = rf(_a0, reader) } else { r0 = ret.Error(0) } diff --git a/pkg/storage/utils/decomposedfs/tree/mocks/Blobstore.go b/pkg/storage/utils/decomposedfs/tree/mocks/Blobstore.go index 4d56641091..80dfc827e0 100644 --- a/pkg/storage/utils/decomposedfs/tree/mocks/Blobstore.go +++ b/pkg/storage/utils/decomposedfs/tree/mocks/Blobstore.go @@ -24,6 +24,7 @@ import ( io "io" testing "testing" + node "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" mock "github.com/stretchr/testify/mock" ) @@ -32,13 +33,13 @@ type Blobstore struct { mock.Mock } -// Delete provides a mock function with given fields: key -func (_m *Blobstore) Delete(key string) error { - ret := _m.Called(key) +// Delete provides a mock function with given fields: _a0 +func (_m *Blobstore) Delete(_a0 *node.Node) error { + ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func(string) error); ok { - r0 = rf(key) + if rf, ok := ret.Get(0).(func(*node.Node) error); ok { + r0 = rf(_a0) } else { r0 = ret.Error(0) } @@ -46,13 +47,13 @@ func (_m *Blobstore) Delete(key string) error { return r0 } -// Download provides a mock function with given fields: key -func (_m *Blobstore) Download(key string) (io.ReadCloser, error) { - ret := _m.Called(key) +// Download provides a mock function with given fields: _a0 +func (_m *Blobstore) Download(_a0 *node.Node) (io.ReadCloser, error) { + ret := _m.Called(_a0) var r0 io.ReadCloser - if rf, ok := ret.Get(0).(func(string) io.ReadCloser); ok { - r0 = rf(key) + if rf, ok := ret.Get(0).(func(*node.Node) io.ReadCloser); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(io.ReadCloser) @@ -60,8 +61,8 @@ func (_m *Blobstore) Download(key string) (io.ReadCloser, error) { } var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(key) + if rf, ok := ret.Get(1).(func(*node.Node) error); ok { + r1 = rf(_a0) } else { r1 = ret.Error(1) } @@ -69,13 +70,13 @@ func (_m *Blobstore) Download(key string) (io.ReadCloser, error) { return r0, r1 } -// Upload provides a mock function with given fields: key, reader -func (_m *Blobstore) Upload(key string, reader io.Reader) error { - ret := _m.Called(key, reader) +// Upload provides a mock function with given fields: _a0, reader +func (_m *Blobstore) Upload(_a0 *node.Node, reader io.Reader) error { + ret := _m.Called(_a0, reader) var r0 error - if rf, ok := ret.Get(0).(func(string, io.Reader) error); ok { - r0 = rf(key, reader) + if rf, ok := ret.Get(0).(func(*node.Node, io.Reader) error); ok { + r0 = rf(_a0, reader) } else { r0 = ret.Error(0) } diff --git a/pkg/storage/utils/decomposedfs/tree/tree.go b/pkg/storage/utils/decomposedfs/tree/tree.go index 52246d4af9..6ca62d0927 100644 --- a/pkg/storage/utils/decomposedfs/tree/tree.go +++ b/pkg/storage/utils/decomposedfs/tree/tree.go @@ -46,9 +46,9 @@ import ( // Blobstore defines an interface for storing blobs in a blobstore type Blobstore interface { - Upload(key string, reader io.Reader) error - Download(key string) (io.ReadCloser, error) - Delete(key string) error + Upload(node *node.Node, reader io.Reader) error + Download(node *node.Node) (io.ReadCloser, error) + Delete(node *node.Node) error } // PathLookup defines the interface for the lookup component @@ -548,7 +548,7 @@ func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, spaceid, key string, pa // delete blob from blobstore if rn.BlobID != "" { - if err = t.DeleteBlob(rn.BlobID); err != nil { + if err = t.DeleteBlob(rn); err != nil { log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trash item blob") return err } @@ -735,22 +735,25 @@ func calculateTreeSize(ctx context.Context, nodePath string) (uint64, error) { } // WriteBlob writes a blob to the blobstore -func (t *Tree) WriteBlob(key string, reader io.Reader) error { - return t.blobstore.Upload(key, reader) +func (t *Tree) WriteBlob(node *node.Node, reader io.Reader) error { + return t.blobstore.Upload(node, reader) } // ReadBlob reads a blob from the blobstore -func (t *Tree) ReadBlob(key string) (io.ReadCloser, error) { - return t.blobstore.Download(key) +func (t *Tree) ReadBlob(node *node.Node) (io.ReadCloser, error) { + return t.blobstore.Download(node) } // DeleteBlob deletes a blob from the blobstore -func (t *Tree) DeleteBlob(key string) error { - if key == "" { - return fmt.Errorf("could not delete blob, empty key was given") +func (t *Tree) DeleteBlob(node *node.Node) error { + if node == nil { + return fmt.Errorf("could not delete blob, nil node was given") + } + if node.BlobID == "" { + return fmt.Errorf("could not delete blob, node with empty blob id was given") } - return t.blobstore.Delete(key) + return t.blobstore.Delete(node) } // TODO check if node exists? diff --git a/pkg/storage/utils/decomposedfs/tree/tree_test.go b/pkg/storage/utils/decomposedfs/tree/tree_test.go index 9486372637..d590025bd1 100644 --- a/pkg/storage/utils/decomposedfs/tree/tree_test.go +++ b/pkg/storage/utils/decomposedfs/tree/tree_test.go @@ -125,7 +125,7 @@ var _ = Describe("Tree", func() { }) It("does not delete the blob from the blobstore", func() { - env.Blobstore.AssertNotCalled(GinkgoT(), "Delete", mock.AnythingOfType("string")) + env.Blobstore.AssertNotCalled(GinkgoT(), "Delete", mock.AnythingOfType("*node.Node")) }) }) }) @@ -136,7 +136,7 @@ var _ = Describe("Tree", func() { ) JustBeforeEach(func() { - env.Blobstore.On("Delete", n.BlobID).Return(nil) + env.Blobstore.On("Delete", mock.AnythingOfType("*node.Node")).Return(nil) trashPath = path.Join(env.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) Expect(t.Delete(env.Ctx, n)).To(Succeed()) }) @@ -157,7 +157,7 @@ var _ = Describe("Tree", func() { }) It("deletes the blob from the blobstore", func() { - env.Blobstore.AssertCalled(GinkgoT(), "Delete", mock.AnythingOfType("string")) + env.Blobstore.AssertCalled(GinkgoT(), "Delete", mock.AnythingOfType("*node.Node")) }) }) @@ -259,7 +259,7 @@ var _ = Describe("Tree", func() { }) It("does not try to delete a blob from the blobstore", func() { - env.Blobstore.AssertNotCalled(GinkgoT(), "Delete", mock.AnythingOfType("string")) + env.Blobstore.AssertNotCalled(GinkgoT(), "Delete", mock.AnythingOfType("*node.Node")) }) }) }) diff --git a/pkg/storage/utils/decomposedfs/upload.go b/pkg/storage/utils/decomposedfs/upload.go index 1addd50158..869f41d254 100644 --- a/pkg/storage/utils/decomposedfs/upload.go +++ b/pkg/storage/utils/decomposedfs/upload.go @@ -582,7 +582,7 @@ func (upload *fileUpload) FinishUpload(ctx context.Context) (err error) { return err } defer file.Close() - err = upload.fs.tp.WriteBlob(n.BlobID, file) + err = upload.fs.tp.WriteBlob(n, file) if err != nil { return errors.Wrap(err, "failed to upload file to blostore") } diff --git a/pkg/storage/utils/decomposedfs/upload_test.go b/pkg/storage/utils/decomposedfs/upload_test.go index 47aeff3edd..f1722f5124 100644 --- a/pkg/storage/utils/decomposedfs/upload_test.go +++ b/pkg/storage/utils/decomposedfs/upload_test.go @@ -230,7 +230,7 @@ var _ = Describe("File uploads", func() { uploadRef := &provider.Reference{Path: "/" + uploadIds["simple"]} - bs.On("Upload", mock.AnythingOfType("string"), mock.AnythingOfType("*os.File")). + bs.On("Upload", mock.AnythingOfType("*node.Node"), mock.AnythingOfType("*os.File")). Return(nil). Run(func(args mock.Arguments) { reader := args.Get(1).(io.Reader) @@ -268,7 +268,7 @@ var _ = Describe("File uploads", func() { uploadRef := &provider.Reference{Path: "/" + uploadIds["simple"]} - bs.On("Upload", mock.AnythingOfType("string"), mock.AnythingOfType("*os.File")). + bs.On("Upload", mock.AnythingOfType("*node.Node"), mock.AnythingOfType("*os.File")). Return(nil). Run(func(args mock.Arguments) { reader := args.Get(1).(io.Reader)