diff --git a/changelog/unreleased/materialized-xattrs.md b/changelog/unreleased/materialized-xattrs.md new file mode 100644 index 0000000000..1412b4e7f5 --- /dev/null +++ b/changelog/unreleased/materialized-xattrs.md @@ -0,0 +1,5 @@ +Enhancement: Introduce ini file based metadata backend + +We added a new metadata backend for the decomposed storage driver that uses an additional `.ini` file to store file metadata. This allows scaling beyond some filesystem specific xattr limitations. + +https://github.com/cs3org/reva/pull/3649 \ No newline at end of file diff --git a/go.mod b/go.mod index ab52a5ac94..c7c7a0430e 100644 --- a/go.mod +++ b/go.mod @@ -58,6 +58,7 @@ require ( github.com/pkg/xattr v0.4.9 github.com/prometheus/alertmanager v0.24.0 github.com/prometheus/client_golang v1.13.0 + github.com/rogpeppe/go-internal v1.3.0 github.com/rs/cors v1.8.2 github.com/rs/zerolog v1.28.0 github.com/sciencemesh/meshdirectory-web v1.0.4 @@ -84,6 +85,7 @@ require ( google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e google.golang.org/grpc v1.50.1 google.golang.org/protobuf v1.28.1 + gopkg.in/ini.v1 v1.67.0 gotest.tools v2.2.0+incompatible ) @@ -201,7 +203,6 @@ require ( golang.org/x/time v0.1.0 // indirect golang.org/x/tools v0.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/go.sum b/go.sum index 307fcb6791..d21db162e6 100644 --- a/go.sum +++ b/go.sum @@ -828,6 +828,7 @@ github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= diff --git a/pkg/storage/utils/decomposedfs/decomposedfs.go b/pkg/storage/utils/decomposedfs/decomposedfs.go index 48c393f74a..87ceaadbd4 100644 --- a/pkg/storage/utils/decomposedfs/decomposedfs.go +++ b/pkg/storage/utils/decomposedfs/decomposedfs.go @@ -24,6 +24,7 @@ package decomposedfs import ( "context" + "fmt" "io" "net/url" "os" @@ -51,6 +52,7 @@ import ( "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/upload" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" "github.com/cs3org/reva/v2/pkg/storage/utils/filelocks" "github.com/cs3org/reva/v2/pkg/storage/utils/templates" "github.com/cs3org/reva/v2/pkg/storagespace" @@ -101,6 +103,15 @@ func NewDefault(m map[string]interface{}, bs tree.Blobstore, es events.Stream) ( return nil, err } + switch o.MetadataBackend { + case "xattrs": + xattrs.UseXattrsBackend() + case "ini": + xattrs.UseIniBackend() + default: + return nil, fmt.Errorf("unknown metadata backend %s, only 'ini' or 'xattrs' (default) supported", o.MetadataBackend) + } + lu := &lookup.Lookup{} lu.Options = o @@ -566,7 +577,7 @@ func (fs *Decomposedfs) CreateDir(ctx context.Context, ref *provider.Reference) if fs.o.TreeTimeAccounting || fs.o.TreeSizeAccounting { // mark the home node as the end of propagation - if err = n.SetXattr(xattrs.PropagationAttr, "1"); err != nil { + if err = n.SetXattr(prefixes.PropagationAttr, "1"); err != nil { appctx.GetLogger(ctx).Error().Err(err).Interface("node", n).Msg("could not mark node to propagate") // FIXME: This does not return an error at all, but results in a severe situation that the diff --git a/pkg/storage/utils/decomposedfs/grants.go b/pkg/storage/utils/decomposedfs/grants.go index 9a47516b92..03b8d208a7 100644 --- a/pkg/storage/utils/decomposedfs/grants.go +++ b/pkg/storage/utils/decomposedfs/grants.go @@ -31,6 +31,7 @@ import ( "github.com/cs3org/reva/v2/pkg/storage/utils/ace" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" "github.com/cs3org/reva/v2/pkg/storagespace" "github.com/cs3org/reva/v2/pkg/utils" ) @@ -202,9 +203,9 @@ func (fs *Decomposedfs) RemoveGrant(ctx context.Context, ref *provider.Reference var attr string if g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP { - attr = xattrs.GrantGroupAcePrefix + g.Grantee.GetGroupId().OpaqueId + attr = prefixes.GrantGroupAcePrefix + g.Grantee.GetGroupId().OpaqueId } else { - attr = xattrs.GrantUserAcePrefix + g.Grantee.GetUserId().OpaqueId + attr = prefixes.GrantUserAcePrefix + g.Grantee.GetUserId().OpaqueId } if err = xattrs.Remove(grantNode.InternalPath(), attr); err != nil { @@ -309,7 +310,7 @@ func (fs *Decomposedfs) storeGrant(ctx context.Context, n *node.Node, g *provide // set the grant e := ace.FromGrant(g) principal, value := e.Marshal() - if err := n.SetXattr(xattrs.GrantPrefix+principal, string(value)); err != nil { + if err := n.SetXattr(prefixes.GrantPrefix+principal, string(value)); err != nil { appctx.GetLogger(ctx).Error().Err(err). Str("principal", principal).Msg("Could not set grant for principal") return err @@ -329,7 +330,7 @@ func extractACEsFromAttrs(ctx context.Context, fsfn string, attrs []string) (ent log := appctx.GetLogger(ctx) entries = []*ace.ACE{} for i := range attrs { - if strings.HasPrefix(attrs[i], xattrs.GrantPrefix) { + if strings.HasPrefix(attrs[i], prefixes.GrantPrefix) { var value string var err error if value, err = xattrs.Get(fsfn, attrs[i]); err != nil { @@ -337,7 +338,7 @@ func extractACEsFromAttrs(ctx context.Context, fsfn string, attrs []string) (ent continue } var e *ace.ACE - principal := attrs[i][len(xattrs.GrantPrefix):] + principal := attrs[i][len(prefixes.GrantPrefix):] if e, err = ace.Unmarshal(principal, []byte(value)); err != nil { log.Error().Err(err).Str("principal", principal).Str("attr", attrs[i]).Msg("could not unmarshal ace") continue diff --git a/pkg/storage/utils/decomposedfs/grants_test.go b/pkg/storage/utils/decomposedfs/grants_test.go index a165961b31..d5613c83cb 100644 --- a/pkg/storage/utils/decomposedfs/grants_test.go +++ b/pkg/storage/utils/decomposedfs/grants_test.go @@ -28,9 +28,9 @@ import ( provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" helpers "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/testhelpers" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/pkg/xattr" "github.com/stretchr/testify/mock" ) @@ -140,9 +140,9 @@ var _ = Describe("Grants", func() { o := env.Owner.GetId() localPath := n.InternalPath() - attr, err := xattr.Get(localPath, xattrs.GrantUserAcePrefix+grant.Grantee.GetUserId().OpaqueId) + attr, err := xattrs.Get(localPath, prefixes.GrantUserAcePrefix+grant.Grantee.GetUserId().OpaqueId) Expect(err).ToNot(HaveOccurred()) - Expect(string(attr)).To(Equal(fmt.Sprintf("\x00t=A:f=:p=rw:c=%s:e=0\n", o.GetOpaqueId()))) // NOTE: this tests ace package + Expect(attr).To(Equal(fmt.Sprintf("\x00t=A:f=:p=rw:c=%s:e=0\n", o.GetOpaqueId()))) // NOTE: this tests ace package }) It("creates a storage space per created grant", func() { diff --git a/pkg/storage/utils/decomposedfs/lookup/lookup.go b/pkg/storage/utils/decomposedfs/lookup/lookup.go index f9477ace08..a4464a4497 100644 --- a/pkg/storage/utils/decomposedfs/lookup/lookup.go +++ b/pkg/storage/utils/decomposedfs/lookup/lookup.go @@ -30,6 +30,7 @@ import ( "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" ) // Lookup implements transformations from filepath to node and back @@ -138,7 +139,7 @@ func (lu *Lookup) WalkPath(ctx context.Context, r *node.Node, p string, followRe } if followReferences { - if attrBytes, err := r.Xattr(xattrs.ReferenceAttr); err == nil { + if attrBytes, err := r.Xattr(prefixes.ReferenceAttr); err == nil { realNodeID := attrBytes ref, err := xattrs.ReferenceFromAttr([]byte(realNodeID)) if err != nil { diff --git a/pkg/storage/utils/decomposedfs/metadata.go b/pkg/storage/utils/decomposedfs/metadata.go index 2ed0cb4ee9..b40b21b4dc 100644 --- a/pkg/storage/utils/decomposedfs/metadata.go +++ b/pkg/storage/utils/decomposedfs/metadata.go @@ -30,6 +30,7 @@ import ( "github.com/cs3org/reva/v2/pkg/errtypes" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" "github.com/cs3org/reva/v2/pkg/storagespace" "github.com/cs3org/reva/v2/pkg/utils" "github.com/pkg/errors" @@ -109,7 +110,7 @@ func (fs *Decomposedfs) SetArbitraryMetadata(ctx context.Context, ref *provider. } } for k, v := range md.Metadata { - attrName := xattrs.MetadataPrefix + k + attrName := prefixes.MetadataPrefix + k if err = n.SetXattr(attrName, v); err != nil { errs = append(errs, errors.Wrap(err, "Decomposedfs: could not set metadata attribute "+attrName+" to "+k)) } @@ -182,7 +183,7 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) continue } - fa := fmt.Sprintf("%s:%s:%s@%s", xattrs.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) + fa := fmt.Sprintf("%s:%s:%s@%s", prefixes.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) if err := n.RemoveXattr(fa); err != nil { if xattrs.IsAttrUnset(err) { continue // already gone, ignore @@ -194,7 +195,7 @@ func (fs *Decomposedfs) UnsetArbitraryMetadata(ctx context.Context, ref *provide errs = append(errs, errors.Wrap(err, "could not unset favorite flag")) } default: - if err = n.RemoveXattr(xattrs.MetadataPrefix + k); err != nil { + if err = n.RemoveXattr(prefixes.MetadataPrefix + k); err != nil { if xattrs.IsAttrUnset(err) { continue // already gone, ignore } diff --git a/pkg/storage/utils/decomposedfs/node/node.go b/pkg/storage/utils/decomposedfs/node/node.go index e391ce85f4..f250ddd865 100644 --- a/pkg/storage/utils/decomposedfs/node/node.go +++ b/pkg/storage/utils/decomposedfs/node/node.go @@ -43,6 +43,7 @@ import ( "github.com/cs3org/reva/v2/pkg/mime" "github.com/cs3org/reva/v2/pkg/storage/utils/ace" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" "github.com/cs3org/reva/v2/pkg/storage/utils/grants" "github.com/cs3org/reva/v2/pkg/utils" "github.com/google/uuid" @@ -87,6 +88,7 @@ type Node struct { lu PathLookup xattrsCache map[string]string + nodeType *provider.ResourceType } // PathLookup defines the interface for the lookup component @@ -97,7 +99,7 @@ type PathLookup interface { } // New returns a new instance of Node -func New(spaceID, id, parentID, name string, blobsize int64, blobID string, owner *userpb.UserId, lu PathLookup) *Node { +func New(spaceID, id, parentID, name string, blobsize int64, blobID string, t provider.ResourceType, owner *userpb.UserId, lu PathLookup) *Node { if blobID == "" { blobID = uuid.New().String() } @@ -110,16 +112,66 @@ func New(spaceID, id, parentID, name string, blobsize int64, blobID string, owne owner: owner, lu: lu, BlobID: blobID, + nodeType: &t, } } +// Type returns the node's resource type +func (n *Node) Type() provider.ResourceType { + if n.nodeType != nil { + return *n.nodeType + } + + t := provider.ResourceType_RESOURCE_TYPE_INVALID + + // Try to read from xattrs + typeAttr, err := n.Xattr(prefixes.TypeAttr) + if err == nil { + typeInt, err := strconv.ParseInt(typeAttr, 10, 32) + if err != nil { + return t + } + t = provider.ResourceType(typeInt) + n.nodeType = &t + return t + } + + // Fall back to checking on disk + fi, err := os.Lstat(n.InternalPath()) + if err != nil { + return t + } + + switch { + case fi.IsDir(): + if _, err = n.Xattr(prefixes.ReferenceAttr); err == nil { + t = provider.ResourceType_RESOURCE_TYPE_REFERENCE + } else { + t = provider.ResourceType_RESOURCE_TYPE_CONTAINER + } + case fi.Mode().IsRegular(): + t = provider.ResourceType_RESOURCE_TYPE_FILE + case fi.Mode()&os.ModeSymlink != 0: + t = provider.ResourceType_RESOURCE_TYPE_SYMLINK + // TODO reference using ext attr on a symlink + // nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE + } + n.nodeType = &t + return t +} + +// SetType sets the type of the node. +func (n *Node) SetType(t provider.ResourceType) { + n.nodeType = &t +} + // ChangeOwner sets the owner of n to newOwner func (n *Node) ChangeOwner(new *userpb.UserId) (err error) { n.SpaceRoot.owner = new - var attribs = map[string]string{xattrs.OwnerIDAttr: new.OpaqueId, - xattrs.OwnerIDPAttr: new.Idp, - xattrs.OwnerTypeAttr: utils.UserTypeToString(new.Type)} + var attribs = map[string]string{prefixes.OwnerIDAttr: new.OpaqueId, + prefixes.OwnerIDPAttr: new.Idp, + prefixes.OwnerTypeAttr: utils.UserTypeToString(new.Type)} if err := n.SpaceRoot.SetXattrs(attribs); err != nil { return err @@ -132,10 +184,11 @@ func (n *Node) ChangeOwner(new *userpb.UserId) (err error) { func (n *Node) WriteAllNodeMetadata() (err error) { attribs := make(map[string]string) - attribs[xattrs.ParentidAttr] = n.ParentID - attribs[xattrs.NameAttr] = n.Name - attribs[xattrs.BlobIDAttr] = n.BlobID - attribs[xattrs.BlobsizeAttr] = strconv.FormatInt(n.Blobsize, 10) + attribs[prefixes.TypeAttr] = strconv.FormatInt(int64(n.Type()), 10) + attribs[prefixes.ParentidAttr] = n.ParentID + attribs[prefixes.NameAttr] = n.Name + attribs[prefixes.BlobIDAttr] = n.BlobID + attribs[prefixes.BlobsizeAttr] = strconv.FormatInt(n.Blobsize, 10) return n.SetXattrs(attribs) } @@ -144,9 +197,9 @@ func (n *Node) WriteAllNodeMetadata() (err error) { func (n *Node) WriteOwner(owner *userpb.UserId) error { n.SpaceRoot.owner = owner attribs := map[string]string{ - xattrs.OwnerIDAttr: owner.OpaqueId, - xattrs.OwnerIDPAttr: owner.Idp, - xattrs.OwnerTypeAttr: utils.UserTypeToString(owner.Type), + prefixes.OwnerIDAttr: owner.OpaqueId, + prefixes.OwnerIDPAttr: owner.Idp, + prefixes.OwnerTypeAttr: utils.UserTypeToString(owner.Type), } if err := n.SpaceRoot.SetXattrs(attribs); err != nil { return err @@ -178,7 +231,8 @@ func (n *Node) SpaceOwnerOrManager(ctx context.Context) *userpb.UserId { } // ReadNode creates a new instance from an id and checks if it exists -func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canListDisabledSpace bool) (n *Node, err error) { +func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canListDisabledSpace bool) (*Node, error) { + var err error // read space root r := &Node{ @@ -194,15 +248,13 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis case err != nil: return nil, err } + r.Exists = true + // lookup name in extended attributes - r.Name, err = r.Xattr(xattrs.NameAttr) - switch { - case xattrs.IsNotExist(err): - return r, nil // swallow not found, the node defaults to exists = false - case err != nil: + r.Name, err = r.Xattr(prefixes.NameAttr) + if err != nil { return nil, err } - r.Exists = true // TODO ReadNode should not check permissions if !canListDisabledSpace && r.IsDisabled() { @@ -232,12 +284,13 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis } // read node - n = &Node{ + n := &Node{ SpaceID: spaceID, lu: lu, ID: nodeID, SpaceRoot: r, } + nodePath := n.InternalPath() // append back revision to nodeid, even when returning a not existing node defer func() { @@ -247,48 +300,20 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis } }() - nodePath := n.InternalPath() - - // lookup name in extended attributes - n.Name, err = n.Xattr(xattrs.NameAttr) + attrs, err := n.Xattrs() switch { case xattrs.IsNotExist(err): return n, nil // swallow not found, the node defaults to exists = false case err != nil: return nil, err } - n.Exists = true - // lookup blobID in extended attributes - n.BlobID, err = ReadBlobIDAttr(nodePath + revisionSuffix) - switch { - case xattrs.IsNotExist(err): - return n, nil // swallow not found, the node defaults to exists = false - case err != nil: - return nil, err - } - - // Lookup blobsize - n.Blobsize, err = ReadBlobSizeAttr(nodePath + revisionSuffix) - switch { - case xattrs.IsNotExist(err): - return n, nil // swallow not found, the node defaults to exists = false - case err != nil: - return nil, errtypes.InternalError(err.Error()) - } - - // lookup parent id in extended attributes - n.ParentID, err = n.Xattr(xattrs.ParentidAttr) - switch { - case xattrs.IsAttrUnset(err): - return nil, errtypes.InternalError(err.Error()) - case xattrs.IsNotExist(err): - return n, nil // swallow not found, the node defaults to exists = false - case err != nil: - return nil, errtypes.InternalError(err.Error()) + n.Name = attrs[prefixes.NameAttr] + n.ParentID = attrs[prefixes.ParentidAttr] + if n.ParentID == "" { + return nil, errtypes.InternalError("Missing parent ID on node") } - // TODO why do we stat the parent? to determine if the current node is in the trash we would need to traverse all parents... // we need to traverse all parents for permissions anyway ... // - we can compare to space root owner with the current user @@ -311,7 +336,7 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis // - can be made more robust with a journal // - same recursion mechanism can be used to purge items? sth we still need to do // - flag the two above options with dtime - _, err = os.Stat(n.ParentInternalPath()) + _, err = os.Stat(n.ParentPath()) if err != nil { if errors.Is(err, fs.ErrNotExist) { return nil, errtypes.NotFound(err.Error()) @@ -319,7 +344,18 @@ func ReadNode(ctx context.Context, lu PathLookup, spaceID, nodeID string, canLis return nil, err } - return + n.BlobID, err = ReadBlobIDAttr(nodePath + revisionSuffix) + if err != nil { + return nil, err + } + + // Lookup blobsize + n.Blobsize, err = ReadBlobSizeAttr(nodePath + revisionSuffix) + if err != nil { + return nil, err + } + + return n, nil } // The os error is buried inside the fs.PathError error @@ -390,12 +426,12 @@ func (n *Node) Parent() (p *Node, err error) { } // lookup parent id in extended attributes - if p.ParentID, err = p.Xattr(xattrs.ParentidAttr); err != nil { + if p.ParentID, err = p.Xattr(prefixes.ParentidAttr); err != nil { p.ParentID = "" return } // lookup name in extended attributes - if p.Name, err = p.Xattr(xattrs.NameAttr); err != nil { + if p.Name, err = p.Xattr(prefixes.NameAttr); err != nil { p.Name = "" p.ParentID = "" return @@ -422,7 +458,7 @@ func (n *Node) readOwner() (*userpb.UserId, error) { var attr string var err error // lookup ID in extended attributes - attr, err = n.SpaceRoot.Xattr(xattrs.OwnerIDAttr) + attr, err = n.SpaceRoot.Xattr(prefixes.OwnerIDAttr) switch { case err == nil: owner.OpaqueId = attr @@ -433,7 +469,7 @@ func (n *Node) readOwner() (*userpb.UserId, error) { } // lookup IDP in extended attributes - attr, err = n.SpaceRoot.Xattr(xattrs.OwnerIDPAttr) + attr, err = n.SpaceRoot.Xattr(prefixes.OwnerIDPAttr) switch { case err == nil: owner.Idp = attr @@ -444,7 +480,7 @@ func (n *Node) readOwner() (*userpb.UserId, error) { } // lookup type in extended attributes - attr, err = n.SpaceRoot.Xattr(xattrs.OwnerTypeAttr) + attr, err = n.SpaceRoot.Xattr(prefixes.OwnerTypeAttr) switch { case err == nil: owner.Type = utils.UserTypeMap(attr) @@ -488,8 +524,8 @@ func (n *Node) InternalPath() string { return n.lu.InternalPath(n.SpaceID, n.ID) } -// ParentInternalPath returns the internal path of the parent of the current node -func (n *Node) ParentInternalPath() string { +// ParentPath returns the internal path of the parent of the current node +func (n *Node) ParentPath() string { return n.lu.InternalPath(n.SpaceID, n.ParentID) } @@ -540,12 +576,7 @@ func (n *Node) SetEtag(ctx context.Context, val string) (err error) { sublog := appctx.GetLogger(ctx).With().Interface("node", n).Logger() var tmTime time.Time if tmTime, err = n.GetTMTime(); err != nil { - // no tmtime, use mtime - var fi os.FileInfo - if fi, err = os.Lstat(n.InternalPath()); err != nil { - return - } - tmTime = fi.ModTime() + return } var etag string if etag, err = calculateEtag(n.ID, tmTime); err != nil { @@ -561,7 +592,7 @@ func (n *Node) SetEtag(ctx context.Context, val string) (err error) { return nil } // etag is only valid until the calculated etag changes, is part of propagation - return n.SetXattr(xattrs.TmpEtagAttr, val) + return n.SetXattr(prefixes.TmpEtagAttr, val) } // SetFavorite sets the favorite for the current user @@ -583,21 +614,14 @@ func (n *Node) SetEtag(ctx context.Context, val string) (err error) { // public tags can be mapped to extended attributes func (n *Node) SetFavorite(uid *userpb.UserId, val string) error { // the favorite flag is specific to the user, so we need to incorporate the userid - fa := fmt.Sprintf("%s:%s:%s@%s", xattrs.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) + fa := fmt.Sprintf("%s:%s:%s@%s", prefixes.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) return n.SetXattr(fa, val) } // IsDir returns true if the node is a directory func (n *Node) IsDir() bool { - nodePath := n.InternalPath() - if fi, err := os.Lstat(nodePath); err == nil { - if fi.IsDir() { - if _, err := n.Xattr(xattrs.ReferenceAttr); err != nil { - return true - } - } - } - return false + attr, _ := n.Xattr(prefixes.TypeAttr) + return attr == strconv.FormatInt(int64(provider.ResourceType_RESOURCE_TYPE_CONTAINER), 10) } // AsResourceInfo return the node as CS3 ResourceInfo @@ -605,29 +629,11 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi sublog := appctx.GetLogger(ctx).With().Interface("node", n.ID).Logger() var fn string - nodePath := n.InternalPath() - - var fi os.FileInfo - - nodeType := provider.ResourceType_RESOURCE_TYPE_INVALID - if fi, err = os.Lstat(nodePath); err != nil { - return - } + nodeType := n.Type() var target string - switch { - case fi.IsDir(): - if target, err = n.Xattr(xattrs.ReferenceAttr); err == nil { - nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE - } else { - nodeType = provider.ResourceType_RESOURCE_TYPE_CONTAINER - } - case fi.Mode().IsRegular(): - nodeType = provider.ResourceType_RESOURCE_TYPE_FILE - case fi.Mode()&os.ModeSymlink != 0: - nodeType = provider.ResourceType_RESOURCE_TYPE_SYMLINK - // TODO reference using ext attr on a symlink - // nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE + if nodeType == provider.ResourceType_RESOURCE_TYPE_REFERENCE { + target, _ = n.Xattr(prefixes.ReferenceAttr) } id := &provider.ResourceId{SpaceId: n.SpaceID, OpaqueId: n.ID} @@ -683,12 +689,11 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi var tmTime time.Time if tmTime, err = n.GetTMTime(); err != nil { - // no tmtime, use mtime - tmTime = fi.ModTime() + sublog.Debug().Err(err).Msg("could not get tmtime") } // use temporary etag if it is set - if b, err := n.Xattr(xattrs.TmpEtagAttr); err == nil { + if b, err := n.Xattr(prefixes.TmpEtagAttr); err == nil { ri.Etag = fmt.Sprintf(`"%x"`, b) // TODO why do we convert string(b)? is the temporary etag stored as string? -> should we use bytes? use hex.EncodeToString? } else if ri.Etag, err = calculateEtag(n.ID, tmTime); err != nil { sublog.Debug().Err(err).Msg("could not calculate etag") @@ -730,7 +735,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi if u, ok := ctxpkg.ContextGetUser(ctx); ok { // the favorite flag is specific to the user, so we need to incorporate the userid if uid := u.GetId(); uid != nil { - fa := fmt.Sprintf("%s:%s:%s@%s", xattrs.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) + fa := fmt.Sprintf("%s:%s:%s@%s", prefixes.FavPrefix, utils.UserTypeToString(uid.GetType()), uid.GetOpaqueId(), uid.GetIdp()) if val, err := n.Xattr(fa); err == nil { sublog.Debug(). Str("favorite", fa). @@ -799,11 +804,11 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi } else { for key, value := range attrs { // filter out non-custom properties - if !strings.HasPrefix(key, xattrs.MetadataPrefix) { + if !strings.HasPrefix(key, prefixes.MetadataPrefix) { continue } // only read when key was requested - k := key[len(xattrs.MetadataPrefix):] + k := key[len(prefixes.MetadataPrefix):] if _, ok := mdKeysMap[k]; returnAllMetadata || ok { metadata[k] = value } @@ -827,7 +832,7 @@ func (n *Node) AsResourceInfo(ctx context.Context, rp *provider.ResourcePermissi } func (n *Node) readChecksumIntoResourceChecksum(ctx context.Context, algo string, ri *provider.ResourceInfo) { - v, err := n.Xattr(xattrs.ChecksumPrefix + algo) + v, err := n.Xattr(prefixes.ChecksumPrefix + algo) switch { case err == nil: ri.Checksum = &provider.ResourceChecksum{ @@ -842,7 +847,7 @@ func (n *Node) readChecksumIntoResourceChecksum(ctx context.Context, algo string } func (n *Node) readChecksumIntoOpaque(ctx context.Context, algo string, ri *provider.ResourceInfo) { - v, err := n.Xattr(xattrs.ChecksumPrefix + algo) + v, err := n.Xattr(prefixes.ChecksumPrefix + algo) switch { case err == nil: if ri.Opaque == nil { @@ -863,7 +868,7 @@ func (n *Node) readChecksumIntoOpaque(ctx context.Context, algo string, ri *prov // quota is always stored on the root node func (n *Node) readQuotaIntoOpaque(ctx context.Context, ri *provider.ResourceInfo) { - v, err := n.Xattr(xattrs.QuotaAttr) + v, err := n.Xattr(prefixes.QuotaAttr) switch { case err == nil: // make sure we have a proper signed int @@ -893,32 +898,38 @@ func (n *Node) readQuotaIntoOpaque(ctx context.Context, ri *provider.ResourceInf // HasPropagation checks if the propagation attribute exists and is set to "1" func (n *Node) HasPropagation() (propagation bool) { - if b, err := n.Xattr(xattrs.PropagationAttr); err == nil { + if b, err := n.Xattr(prefixes.PropagationAttr); err == nil { return b == "1" } return false } // GetTMTime reads the tmtime from the extended attributes -func (n *Node) GetTMTime() (tmTime time.Time, err error) { - var b string - if b, err = n.Xattr(xattrs.TreeMTimeAttr); err != nil { - return +func (n *Node) GetTMTime() (time.Time, error) { + b, err := n.Xattr(prefixes.TreeMTimeAttr) + if err == nil { + return time.Parse(time.RFC3339Nano, b) } - return time.Parse(time.RFC3339Nano, b) + + // no tmtime, use mtime + fi, err := os.Lstat(n.InternalPath()) + if err != nil { + return time.Time{}, err + } + return fi.ModTime(), nil } // SetTMTime writes the UTC tmtime to the extended attributes or removes the attribute if nil is passed func (n *Node) SetTMTime(t *time.Time) (err error) { if t == nil { - return n.RemoveXattr(xattrs.TreeMTimeAttr) + return n.RemoveXattr(prefixes.TreeMTimeAttr) } - return n.SetXattr(xattrs.TreeMTimeAttr, t.UTC().Format(time.RFC3339Nano)) + return n.SetXattr(prefixes.TreeMTimeAttr, t.UTC().Format(time.RFC3339Nano)) } // GetDTime reads the dtime from the extended attributes func (n *Node) GetDTime() (tmTime time.Time, err error) { - b, err := n.Xattr(xattrs.DTimeAttr) + b, err := n.Xattr(prefixes.DTimeAttr) if err != nil { return time.Time{}, err } @@ -928,9 +939,9 @@ func (n *Node) GetDTime() (tmTime time.Time, err error) { // SetDTime writes the UTC dtime to the extended attributes or removes the attribute if nil is passed func (n *Node) SetDTime(t *time.Time) (err error) { if t == nil { - return n.RemoveXattr(xattrs.DTimeAttr) + return n.RemoveXattr(prefixes.DTimeAttr) } - return n.SetXattr(xattrs.DTimeAttr, t.UTC().Format(time.RFC3339Nano)) + return n.SetXattr(prefixes.DTimeAttr, t.UTC().Format(time.RFC3339Nano)) } // IsDisabled returns true when the node has a dmtime attribute set @@ -946,7 +957,7 @@ func (n *Node) IsDisabled() bool { // GetTreeSize reads the treesize from the extended attributes func (n *Node) GetTreeSize() (treesize uint64, err error) { var b string - if b, err = n.Xattr(xattrs.TreesizeAttr); err != nil { + if b, err = n.Xattr(prefixes.TreesizeAttr); err != nil { return } return strconv.ParseUint(b, 10, 64) @@ -954,13 +965,13 @@ func (n *Node) GetTreeSize() (treesize uint64, err error) { // SetTreeSize writes the treesize to the extended attributes func (n *Node) SetTreeSize(ts uint64) (err error) { - return n.SetXattr(xattrs.TreesizeAttr, strconv.FormatUint(ts, 10)) + return n.SetXattr(prefixes.TreesizeAttr, strconv.FormatUint(ts, 10)) } // GetBlobSize reads the blobsize from the extended attributes func (n *Node) GetBlobSize() (treesize uint64, err error) { var b string - if b, err = n.Xattr(xattrs.BlobsizeAttr); err != nil { + if b, err = n.Xattr(prefixes.BlobsizeAttr); err != nil { return } return strconv.ParseUint(b, 10, 64) @@ -968,12 +979,12 @@ func (n *Node) GetBlobSize() (treesize uint64, err error) { // SetChecksum writes the checksum with the given checksum type to the extended attributes func (n *Node) SetChecksum(csType string, h hash.Hash) (err error) { - return n.SetXattr(xattrs.ChecksumPrefix+csType, string(h.Sum(nil))) + return n.SetXattr(prefixes.ChecksumPrefix+csType, string(h.Sum(nil))) } // UnsetTempEtag removes the temporary etag attribute func (n *Node) UnsetTempEtag() (err error) { - return n.RemoveXattr(xattrs.TmpEtagAttr) + return n.RemoveXattr(prefixes.TmpEtagAttr) } // ReadUserPermissions will assemble the permissions for the current user on the given node without parent nodes @@ -1008,15 +1019,15 @@ func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap prov // 1. we can start iterating over the acls / grants on the node or // 2. we can iterate over the number of groups // The current implementation tries to be defensive for cases where users have hundreds or thousands of groups, so we iterate over the existing acls. - userace := xattrs.GrantUserAcePrefix + u.Id.OpaqueId + userace := prefixes.GrantUserAcePrefix + u.Id.OpaqueId userFound := false for i := range grantees { switch { // we only need to find the user once case !userFound && grantees[i] == userace: g, err = n.ReadGrant(ctx, grantees[i]) - case strings.HasPrefix(grantees[i], xattrs.GrantGroupAcePrefix): // only check group grantees - gr := strings.TrimPrefix(grantees[i], xattrs.GrantGroupAcePrefix) + case strings.HasPrefix(grantees[i], prefixes.GrantGroupAcePrefix): // only check group grantees + gr := strings.TrimPrefix(grantees[i], prefixes.GrantGroupAcePrefix) if groupsMap[gr] { g, err = n.ReadGrant(ctx, grantees[i]) } else { @@ -1052,7 +1063,7 @@ func (n *Node) ReadUserPermissions(ctx context.Context, u *userpb.User) (ap prov // IsDenied checks if the node was denied to that user func (n *Node) IsDenied(ctx context.Context) bool { u := ctxpkg.ContextMustGetUser(ctx) - userace := xattrs.GrantUserAcePrefix + u.Id.OpaqueId + userace := prefixes.GrantUserAcePrefix + u.Id.OpaqueId g, err := n.ReadGrant(ctx, userace) switch { case err == nil: @@ -1076,7 +1087,7 @@ func (n *Node) ListGrantees(ctx context.Context) (grantees []string, err error) return nil, err } for name := range attrs { - if strings.HasPrefix(name, xattrs.GrantPrefix) { + if strings.HasPrefix(name, prefixes.GrantPrefix) { grantees = append(grantees, name) } } @@ -1090,7 +1101,7 @@ func (n *Node) ReadGrant(ctx context.Context, grantee string) (g *provider.Grant return nil, err } var e *ace.ACE - if e, err = ace.Unmarshal(strings.TrimPrefix(grantee, xattrs.GrantPrefix), []byte(xattr)); err != nil { + if e, err = ace.Unmarshal(strings.TrimPrefix(grantee, prefixes.GrantPrefix), []byte(xattr)); err != nil { return nil, err } return e.Grant(), nil @@ -1122,7 +1133,7 @@ func (n *Node) ListGrants(ctx context.Context) ([]*provider.Grant, error) { // ReadBlobSizeAttr reads the blobsize from the xattrs func ReadBlobSizeAttr(path string) (int64, error) { - attr, err := xattrs.Get(path, xattrs.BlobsizeAttr) + attr, err := xattrs.Get(path, prefixes.BlobsizeAttr) if err != nil { return 0, errors.Wrapf(err, "error reading blobsize xattr") } @@ -1135,7 +1146,7 @@ func ReadBlobSizeAttr(path string) (int64, error) { // ReadBlobIDAttr reads the blobsize from the xattrs func ReadBlobIDAttr(path string) (string, error) { - attr, err := xattrs.Get(path, xattrs.BlobIDAttr) + attr, err := xattrs.Get(path, prefixes.BlobIDAttr) if err != nil { return "", errors.Wrapf(err, "error reading blobid xattr") } @@ -1148,9 +1159,9 @@ func (n *Node) getGranteeTypes(ctx context.Context) []provider.GranteeType { hasUserShares, hasGroupShares := false, false for i := range g { switch { - case !hasUserShares && strings.HasPrefix(g[i], xattrs.GrantUserAcePrefix): + case !hasUserShares && strings.HasPrefix(g[i], prefixes.GrantUserAcePrefix): hasUserShares = true - case !hasGroupShares && strings.HasPrefix(g[i], xattrs.GrantGroupAcePrefix): + case !hasGroupShares && strings.HasPrefix(g[i], prefixes.GrantGroupAcePrefix): hasGroupShares = true case hasUserShares && hasGroupShares: break @@ -1200,37 +1211,37 @@ func (n *Node) FindStorageSpaceRoot() error { // UnmarkProcessing removes the processing flag from the node func (n *Node) UnmarkProcessing(uploadID string) error { - v, _ := n.Xattr(xattrs.StatusPrefix) + v, _ := n.Xattr(prefixes.StatusPrefix) if v != ProcessingStatus+uploadID { // file started another postprocessing later - do not remove return nil } - return n.RemoveXattr(xattrs.StatusPrefix) + return n.RemoveXattr(prefixes.StatusPrefix) } // IsProcessing returns true if the node is currently being processed func (n *Node) IsProcessing() bool { - v, err := n.Xattr(xattrs.StatusPrefix) + v, err := n.Xattr(prefixes.StatusPrefix) return err == nil && strings.HasPrefix(v, ProcessingStatus) } // IsSpaceRoot checks if the node is a space root func (n *Node) IsSpaceRoot() bool { - _, err := n.Xattr(xattrs.SpaceNameAttr) + _, err := n.Xattr(prefixes.SpaceNameAttr) return err == nil } // SetScanData sets the virus scan info to the node func (n *Node) SetScanData(info string, date time.Time) error { return xattrs.SetMultiple(n.InternalPath(), map[string]string{ - xattrs.ScanStatusPrefix: info, - xattrs.ScanDatePrefix: date.Format(time.RFC3339Nano), + prefixes.ScanStatusPrefix: info, + prefixes.ScanDatePrefix: date.Format(time.RFC3339Nano), }) } // ScanData returns scanning information of the node func (n *Node) ScanData() (scanned bool, virus string, scantime time.Time) { - ti, _ := n.Xattr(xattrs.ScanDatePrefix) + ti, _ := n.Xattr(prefixes.ScanDatePrefix) if ti == "" { return // not scanned yet } @@ -1240,7 +1251,7 @@ func (n *Node) ScanData() (scanned bool, virus string, scantime time.Time) { return } - i, err := n.Xattr(xattrs.ScanStatusPrefix) + i, err := n.Xattr(prefixes.ScanStatusPrefix) if err != nil { return } @@ -1258,7 +1269,7 @@ var CheckQuota = func(spaceRoot *Node, overwrite bool, oldSize, newSize uint64) if !enoughDiskSpace(spaceRoot.InternalPath(), newSize) { return false, errtypes.InsufficientStorage("disk full") } - quotaByteStr, _ := spaceRoot.Xattr(xattrs.QuotaAttr) + quotaByteStr, _ := spaceRoot.Xattr(prefixes.QuotaAttr) if quotaByteStr == "" || quotaByteStr == QuotaUnlimited { // if quota is not set, it means unlimited return true, nil @@ -1282,3 +1293,39 @@ func enoughDiskSpace(path string, fileSize uint64) bool { } return avalB > fileSize } + +// TypeFromPath returns the type of the node at the given path +func TypeFromPath(path string) provider.ResourceType { + // Try to read from xattrs + typeAttr, err := xattrs.Get(path, prefixes.TypeAttr) + t := provider.ResourceType_RESOURCE_TYPE_INVALID + if err == nil { + typeInt, err := strconv.ParseInt(typeAttr, 10, 32) + if err != nil { + return t + } + return provider.ResourceType(typeInt) + } + + // Fall back to checking on disk + fi, err := os.Lstat(path) + if err != nil { + return t + } + + switch { + case fi.IsDir(): + if _, err = xattrs.Get(path, prefixes.ReferenceAttr); err == nil { + t = provider.ResourceType_RESOURCE_TYPE_REFERENCE + } else { + t = provider.ResourceType_RESOURCE_TYPE_CONTAINER + } + case fi.Mode().IsRegular(): + t = provider.ResourceType_RESOURCE_TYPE_FILE + case fi.Mode()&os.ModeSymlink != 0: + t = provider.ResourceType_RESOURCE_TYPE_SYMLINK + // TODO reference using ext attr on a symlink + // nodeType = provider.ResourceType_RESOURCE_TYPE_REFERENCE + } + return t +} diff --git a/pkg/storage/utils/decomposedfs/node/node_test.go b/pkg/storage/utils/decomposedfs/node/node_test.go index c92ce5b785..c4faee2d04 100644 --- a/pkg/storage/utils/decomposedfs/node/node_test.go +++ b/pkg/storage/utils/decomposedfs/node/node_test.go @@ -58,8 +58,8 @@ var _ = Describe("Node", func() { Describe("New", func() { It("generates unique blob ids if none are given", func() { - n1 := node.New(env.SpaceRootRes.SpaceId, id, "", name, 10, "", env.Owner.Id, env.Lookup) - n2 := node.New(env.SpaceRootRes.SpaceId, id, "", name, 10, "", env.Owner.Id, env.Lookup) + n1 := node.New(env.SpaceRootRes.SpaceId, id, "", name, 10, "", provider.ResourceType_RESOURCE_TYPE_FILE, env.Owner.Id, env.Lookup) + n2 := node.New(env.SpaceRootRes.SpaceId, id, "", name, 10, "", provider.ResourceType_RESOURCE_TYPE_FILE, env.Owner.Id, env.Lookup) Expect(len(n1.BlobID)).To(Equal(36)) Expect(n1.BlobID).ToNot(Equal(n2.BlobID)) @@ -327,6 +327,8 @@ var _ = Describe("Node", func() { }) Expect(err).ToNot(HaveOccurred()) // checking that the path "subpath" is denied properly + subfolder, err = node.ReadNode(env.Ctx, env.Lookup, subfolder.SpaceID, subfolder.ID, false) + Expect(err).ToNot(HaveOccurred()) subfolderActual, denied := subfolder.PermissionSet(env.Ctx) subfolderExpected := ocsconv.NewDeniedRole().CS3ResourcePermissions() Expect(grants.PermissionsEqual(&subfolderActual, subfolderExpected)).To(BeTrue()) diff --git a/pkg/storage/utils/decomposedfs/node/xattrs.go b/pkg/storage/utils/decomposedfs/node/xattrs.go index 3945d1cf63..441690f82d 100644 --- a/pkg/storage/utils/decomposedfs/node/xattrs.go +++ b/pkg/storage/utils/decomposedfs/node/xattrs.go @@ -92,11 +92,11 @@ func (n *Node) Xattrs() (map[string]string, error) { // been cached it is not read from disk again. func (n *Node) Xattr(key string) (string, error) { if n.xattrsCache == nil { - b, err := xattr.Get(n.InternalPath(), key) + b, err := xattrs.Get(n.InternalPath(), key) if err != nil { return "", err } - return string(b), nil + return b, nil } if val, ok := n.xattrsCache[key]; ok { diff --git a/pkg/storage/utils/decomposedfs/options/options.go b/pkg/storage/utils/decomposedfs/options/options.go index 4afe2e38d2..91f1b5c0d1 100644 --- a/pkg/storage/utils/decomposedfs/options/options.go +++ b/pkg/storage/utils/decomposedfs/options/options.go @@ -33,6 +33,10 @@ type Option func(o *Options) // Options defines the available options for this package. type Options struct { + + // the metadata backend to use, currently supports `xattr` or `ini` + MetadataBackend string `mapstructure:"metadata_backend"` + // ocis fs works on top of a dir of uuid nodes Root string `mapstructure:"root"` @@ -97,6 +101,10 @@ func New(m map[string]interface{}) (*Options, error) { return nil, err } + if o.MetadataBackend == "" { + o.MetadataBackend = "xattrs" + } + if o.UserLayout == "" { o.UserLayout = "{{.Id.OpaqueId}}" } diff --git a/pkg/storage/utils/decomposedfs/recycle.go b/pkg/storage/utils/decomposedfs/recycle.go index 5b7af437b2..32e3569614 100644 --- a/pkg/storage/utils/decomposedfs/recycle.go +++ b/pkg/storage/utils/decomposedfs/recycle.go @@ -23,6 +23,7 @@ import ( iofs "io/fs" "os" "path/filepath" + "strconv" "strings" "time" @@ -33,9 +34,9 @@ import ( "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" "github.com/cs3org/reva/v2/pkg/storagespace" "github.com/pkg/errors" - "github.com/pkg/xattr" ) // Recycle items are stored inside the node folder and start with the uuid of the deleted node. @@ -80,17 +81,21 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference // build a list of trash items relative to the given trash root and path items := make([]*provider.RecycleItem, 0) - trashRootPath := filepath.Join(fs.getRecycleRoot(ctx, spaceID), lookup.Pathify(key, 4, 2)) - _, timeSuffix, err := readTrashLink(trashRootPath) + trashRootPath := filepath.Join(fs.getRecycleRoot(spaceID), lookup.Pathify(key, 4, 2)) + originalPath, _, timeSuffix, err := readTrashLink(trashRootPath) if err != nil { sublog.Error().Err(err).Str("trashRoot", trashRootPath).Msg("error reading trash link") return nil, err } origin := "" + attrs, err := xattrs.All(originalPath) + if err != nil { + return items, err + } // lookup origin path in extended attributes - if attrBytes, err := xattr.Get(trashRootPath, xattrs.TrashOriginAttr); err == nil { - origin = string(attrBytes) + if attrBytes, ok := attrs[prefixes.TrashOriginAttr]; ok { + origin = attrBytes } else { sublog.Error().Err(err).Str("space", spaceID).Msg("could not read origin path, skipping") return nil, err @@ -107,112 +112,144 @@ func (fs *Decomposedfs) ListRecycle(ctx context.Context, ref *provider.Reference sublog.Error().Err(err).Msg("could not parse time format, ignoring") } - trashItemPath := filepath.Join(trashRootPath, relativePath) - - f, err := os.Open(trashItemPath) - if err != nil { - if errors.Is(err, iofs.ErrNotExist) { - return items, nil - } - return nil, errors.Wrapf(err, "recycle: error opening trashItemPath %s", trashItemPath) - } - defer f.Close() - - if md, err := f.Stat(); err != nil { - return nil, err - } else if !md.IsDir() { + nodeType := node.TypeFromPath(originalPath) + if nodeType != provider.ResourceType_RESOURCE_TYPE_CONTAINER { // this is the case when we want to directly list a file in the trashbin - item, err := fs.createTrashItem(ctx, md, filepath.Join(key, relativePath), deletionTime) + blobsize, err := strconv.ParseInt(attrs[prefixes.BlobsizeAttr], 10, 64) if err != nil { return items, err } - item.Ref = &provider.Reference{ - Path: filepath.Join(origin, relativePath), + item := &provider.RecycleItem{ + Type: nodeType, + Size: uint64(blobsize), + Key: filepath.Join(key, relativePath), + DeletionTime: deletionTime, + Ref: &provider.Reference{ + Path: filepath.Join(origin, relativePath), + }, } items = append(items, item) return items, err } // we have to read the names and stat the path to follow the symlinks - names, err := f.Readdirnames(0) + if err != nil { + return nil, err + } + childrenPath := filepath.Join(originalPath, relativePath) + childrenDir, err := os.Open(childrenPath) + if err != nil { + return nil, err + } + + names, err := childrenDir.Readdirnames(0) if err != nil { return nil, err } for _, name := range names { - md, err := os.Stat(filepath.Join(trashItemPath, name)) + resolvedChildPath, err := filepath.EvalSymlinks(filepath.Join(childrenPath, name)) if err != nil { - sublog.Error().Err(err).Str("name", name).Msg("could not stat, skipping") + sublog.Error().Err(err).Str("name", name).Msg("could not resolve symlink, skipping") continue } - if item, err := fs.createTrashItem(ctx, md, filepath.Join(key, relativePath, name), deletionTime); err == nil { - item.Ref = &provider.Reference{ - Path: filepath.Join(origin, relativePath, name), + + size := int64(0) + + nodeType = node.TypeFromPath(resolvedChildPath) + switch nodeType { + case provider.ResourceType_RESOURCE_TYPE_FILE: + size, err = node.ReadBlobSizeAttr(resolvedChildPath) + if err != nil { + sublog.Error().Err(err).Str("name", name).Msg("invalid blob size, skipping") + continue + } + case provider.ResourceType_RESOURCE_TYPE_CONTAINER: + attr, err := xattrs.Get(resolvedChildPath, prefixes.TreesizeAttr) + if err != nil { + sublog.Error().Err(err).Str("name", name).Msg("invalid tree size, skipping") + continue + } + size, err = strconv.ParseInt(attr, 10, 64) + if err != nil { + sublog.Error().Err(err).Str("name", name).Msg("invalid tree size, skipping") + continue } - items = append(items, item) + case provider.ResourceType_RESOURCE_TYPE_INVALID: + sublog.Error().Err(err).Str("name", name).Str("resolvedChildPath", resolvedChildPath).Msg("invalid node type, skipping") + continue } - } - return items, nil -} - -func (fs *Decomposedfs) createTrashItem(ctx context.Context, md iofs.FileInfo, key string, deletionTime *types.Timestamp) (*provider.RecycleItem, error) { - item := &provider.RecycleItem{ - Type: getResourceType(md.IsDir()), - Size: uint64(md.Size()), - Key: key, - DeletionTime: deletionTime, + item := &provider.RecycleItem{ + Type: nodeType, + Size: uint64(size), + Key: filepath.Join(key, relativePath, name), + DeletionTime: deletionTime, + Ref: &provider.Reference{ + Path: filepath.Join(origin, relativePath, name), + }, + } + items = append(items, item) } - - // TODO filter results by permission ... on the original parent? or the trashed node? - // if it were on the original parent it would be possible to see files that were trashed before the current user got access - // so -> check the trash node itself - // hmm listing trash currently lists the current users trash or the 'root' trash. from ocs only the home storage is queried for trash items. - // for now we can only really check if the current user is the owner - return item, nil + return items, nil } -// readTrashLink returns nodeID and timestamp -func readTrashLink(path string) (string, string, error) { +// readTrashLink returns path, nodeID and timestamp +func readTrashLink(path string) (string, string, string, error) { link, err := os.Readlink(path) if err != nil { - return "", "", err + return "", "", "", err + } + resolved, err := filepath.EvalSymlinks(path) + if err != nil { + return "", "", "", err } // ../../../../../nodes/e5/6c/75/a8/-d235-4cbb-8b4e-48b6fd0f2094.T.2022-02-16T14:38:11.769917408Z // TODO use filepath.Separator to support windows link = strings.ReplaceAll(link, "/", "") // ..........nodese56c75a8-d235-4cbb-8b4e-48b6fd0f2094.T.2022-02-16T14:38:11.769917408Z if link[0:15] != "..........nodes" || link[51:54] != node.TrashIDDelimiter { - return "", "", errtypes.InternalError("malformed trash link") + return "", "", "", errtypes.InternalError("malformed trash link") } - return link[15:51], link[54:], nil + return resolved, link[15:51], link[54:], nil } func (fs *Decomposedfs) listTrashRoot(ctx context.Context, spaceID string) ([]*provider.RecycleItem, error) { log := appctx.GetLogger(ctx) items := make([]*provider.RecycleItem, 0) - trashRoot := fs.getRecycleRoot(ctx, spaceID) + trashRoot := fs.getRecycleRoot(spaceID) matches, err := filepath.Glob(trashRoot + "/*/*/*/*/*") if err != nil { return nil, err } for _, itemPath := range matches { - nodeID, timeSuffix, err := readTrashLink(itemPath) + nodePath, nodeID, timeSuffix, err := readTrashLink(itemPath) if err != nil { log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Msg("error reading trash link, skipping") continue } - nodePath := fs.lu.InternalPath(spaceID, nodeID) + node.TrashIDDelimiter + timeSuffix md, err := os.Stat(nodePath) if err != nil { log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("could not stat trash item, skipping") continue } + attrs, err := xattrs.All(nodePath) + if err != nil { + log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("could not get extended attributes, skipping") + continue + } + + nodeType := node.TypeFromPath(nodePath) + if nodeType == provider.ResourceType_RESOURCE_TYPE_INVALID { + log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node_path", nodePath).Msg("invalid node type, skipping") + continue + } + item := &provider.RecycleItem{ - Type: getResourceType(md.IsDir()), + Type: nodeType, Size: uint64(md.Size()), Key: nodeID, } @@ -226,11 +263,10 @@ func (fs *Decomposedfs) listTrashRoot(ctx context.Context, spaceID string) ([]*p } // lookup origin path in extended attributes - var attrBytes []byte - if attrBytes, err = xattr.Get(nodePath, xattrs.TrashOriginAttr); err == nil { - item.Ref = &provider.Reference{Path: string(attrBytes)} + if attr, ok := attrs[prefixes.TrashOriginAttr]; ok { + item.Ref = &provider.Reference{Path: attr} } else { - log.Error().Err(err).Str("trashRoot", trashRoot).Str("item", itemPath).Str("node", nodeID).Str("dtime", timeSuffix).Msg("could not read origin path, skipping") + log.Error().Str("trashRoot", trashRoot).Str("item", itemPath).Str("node", nodeID).Str("dtime", timeSuffix).Msg("could not read origin path, skipping") continue } // TODO filter results by permission ... on the original parent? or the trashed node? @@ -344,16 +380,9 @@ func (fs *Decomposedfs) EmptyRecycle(ctx context.Context, ref *provider.Referenc } // TODO what permission should we check? we could check the root node of the user? or the owner permissions on his home root node? // The current impl will wipe your own trash. or when no user provided the trash of 'root' - return os.RemoveAll(fs.getRecycleRoot(ctx, ref.ResourceId.SpaceId)) -} - -func getResourceType(isDir bool) provider.ResourceType { - if isDir { - return provider.ResourceType_RESOURCE_TYPE_CONTAINER - } - return provider.ResourceType_RESOURCE_TYPE_FILE + return os.RemoveAll(fs.getRecycleRoot(ref.ResourceId.SpaceId)) } -func (fs *Decomposedfs) getRecycleRoot(ctx context.Context, spaceID string) string { - return filepath.Join(fs.o.Root, "spaces", lookup.Pathify(spaceID, 1, 2), "trash") +func (fs *Decomposedfs) getRecycleRoot(spaceID string) string { + return filepath.Join(fs.getSpaceRoot(spaceID), "trash") } diff --git a/pkg/storage/utils/decomposedfs/revisions.go b/pkg/storage/utils/decomposedfs/revisions.go index e97b970392..dbe7eab9bc 100644 --- a/pkg/storage/utils/decomposedfs/revisions.go +++ b/pkg/storage/utils/decomposedfs/revisions.go @@ -31,6 +31,7 @@ import ( "github.com/cs3org/reva/v2/pkg/errtypes" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" "github.com/cs3org/reva/v2/pkg/storagespace" "github.com/pkg/errors" ) @@ -70,6 +71,10 @@ func (fs *Decomposedfs) ListRevisions(ctx context.Context, ref *provider.Referen np := n.InternalPath() if items, err := filepath.Glob(np + node.RevisionIDDelimiter + "*"); err == nil { for i := range items { + if xattrs.IsMetaFile(items[i]) { + continue + } + if fi, err := os.Stat(items[i]); err == nil { parts := strings.SplitN(fi.Name(), node.RevisionIDDelimiter, 2) if len(parts) != 2 { @@ -211,24 +216,34 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer newRevisionPath := fs.lu.InternalPath(spaceID, kp[0]+node.RevisionIDDelimiter+fi.ModTime().UTC().Format(time.RFC3339Nano)) // touch new revision - if file, err := os.Create(newRevisionPath); err != nil { - return err - } else if err := file.Close(); err != nil { + if _, err := os.Create(newRevisionPath); err != nil { return err } + if xattrs.UsesExternalMetadataFile() { + if _, err := os.Create(xattrs.MetadataPath(newRevisionPath)); err != nil { + _ = os.Remove(newRevisionPath) + return err + } + } defer func() { if returnErr != nil { if err := os.Remove(newRevisionPath); err != nil { log.Error().Err(err).Str("revision", filepath.Base(newRevisionPath)).Msg("could not clean up revision node") } + if xattrs.UsesExternalMetadataFile() { + if err := os.Remove(xattrs.MetadataPath(newRevisionPath)); err != nil { + log.Error().Err(err).Str("revision", filepath.Base(newRevisionPath)).Msg("could not clean up revision node") + } + } } }() // copy blob metadata from node to new revision node err = xattrs.CopyMetadata(nodePath, newRevisionPath, func(attributeName string) bool { - return strings.HasPrefix(attributeName, xattrs.ChecksumPrefix) || // for checksums - attributeName == xattrs.BlobIDAttr || - attributeName == xattrs.BlobsizeAttr + return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || // for checksums + attributeName == prefixes.TypeAttr || + attributeName == prefixes.BlobIDAttr || + attributeName == prefixes.BlobsizeAttr }) if err != nil { return errtypes.InternalError("failed to copy blob xattrs to version node") @@ -244,15 +259,16 @@ func (fs *Decomposedfs) RestoreRevision(ctx context.Context, ref *provider.Refer // copy blob metadata from restored revision to node restoredRevisionPath := fs.lu.InternalPath(spaceID, revisionKey) err = xattrs.CopyMetadata(restoredRevisionPath, nodePath, func(attributeName string) bool { - return strings.HasPrefix(attributeName, xattrs.ChecksumPrefix) || - attributeName == xattrs.BlobIDAttr || - attributeName == xattrs.BlobsizeAttr + return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || + attributeName == prefixes.TypeAttr || + attributeName == prefixes.BlobIDAttr || + attributeName == prefixes.BlobsizeAttr }) if err != nil { return errtypes.InternalError("failed to copy blob xattrs to old revision to node") } - revisionSize, err := xattrs.GetInt64(restoredRevisionPath, xattrs.BlobsizeAttr) + revisionSize, err := xattrs.GetInt64(restoredRevisionPath, prefixes.BlobsizeAttr) if err != nil { return errtypes.InternalError("failed to read blob size xattr from old revision") } diff --git a/pkg/storage/utils/decomposedfs/spaces.go b/pkg/storage/utils/decomposedfs/spaces.go index bd56956f39..f9b1bddf6b 100644 --- a/pkg/storage/utils/decomposedfs/spaces.go +++ b/pkg/storage/utils/decomposedfs/spaces.go @@ -41,6 +41,7 @@ import ( "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" "github.com/cs3org/reva/v2/pkg/storage/utils/filelocks" "github.com/cs3org/reva/v2/pkg/storage/utils/templates" "github.com/cs3org/reva/v2/pkg/storagespace" @@ -93,9 +94,17 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr } // create a directory node + root.SetType(provider.ResourceType_RESOURCE_TYPE_CONTAINER) rootPath := root.InternalPath() - if err = os.MkdirAll(rootPath, 0700); err != nil { - return nil, errors.Wrap(err, "decomposedfs: error creating node") + + if err := os.MkdirAll(rootPath, 0700); err != nil { + return nil, errors.Wrap(err, "Decomposedfs: error creating node") + } + if xattrs.UsesExternalMetadataFile() { + _, err = os.Create(xattrs.MetadataPath(rootPath)) + if err != nil { + return nil, errors.Wrap(err, "Decomposedfs: error creating metadata file") + } } if err := root.WriteAllNodeMetadata(); err != nil { @@ -120,25 +129,25 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr // always enable propagation on the storage space root // mark the space root node as the end of propagation - metadata[xattrs.PropagationAttr] = "1" - metadata[xattrs.NameAttr] = req.Name - metadata[xattrs.SpaceNameAttr] = req.Name + metadata[prefixes.PropagationAttr] = "1" + metadata[prefixes.NameAttr] = req.Name + metadata[prefixes.SpaceNameAttr] = req.Name if req.Type != "" { - metadata[xattrs.SpaceTypeAttr] = req.Type + metadata[prefixes.SpaceTypeAttr] = req.Type } if q := req.GetQuota(); q != nil { // set default space quota - metadata[xattrs.QuotaAttr] = strconv.FormatUint(q.QuotaMaxBytes, 10) + metadata[prefixes.QuotaAttr] = strconv.FormatUint(q.QuotaMaxBytes, 10) } if description != "" { - metadata[xattrs.SpaceDescriptionAttr] = description + metadata[prefixes.SpaceDescriptionAttr] = description } if alias != "" { - metadata[xattrs.SpaceAliasAttr] = alias + metadata[prefixes.SpaceAliasAttr] = alias } if err := root.SetXattrs(metadata); err != nil { @@ -424,21 +433,21 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up metadata := make(map[string]string, 5) if space.Name != "" { - metadata[xattrs.NameAttr] = space.Name - metadata[xattrs.SpaceNameAttr] = space.Name + metadata[prefixes.NameAttr] = space.Name + metadata[prefixes.SpaceNameAttr] = space.Name } if space.Quota != nil { - metadata[xattrs.QuotaAttr] = strconv.FormatUint(space.Quota.QuotaMaxBytes, 10) + metadata[prefixes.QuotaAttr] = strconv.FormatUint(space.Quota.QuotaMaxBytes, 10) } // TODO also return values which are not in the request if space.Opaque != nil { if description, ok := space.Opaque.Map["description"]; ok { - metadata[xattrs.SpaceDescriptionAttr] = string(description.Value) + metadata[prefixes.SpaceDescriptionAttr] = string(description.Value) } if alias := utils.ReadPlainFromOpaque(space.Opaque, "spaceAlias"); alias != "" { - metadata[xattrs.SpaceAliasAttr] = alias + metadata[prefixes.SpaceAliasAttr] = alias } if image := utils.ReadPlainFromOpaque(space.Opaque, "image"); image != "" { imageID, err := storagespace.ParseID(image) @@ -447,7 +456,7 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up Status: &v1beta11.Status{Code: v1beta11.Code_CODE_NOT_FOUND, Message: "decomposedFS: space image resource not found"}, }, nil } - metadata[xattrs.SpaceImageAttr] = imageID.OpaqueId + metadata[prefixes.SpaceImageAttr] = imageID.OpaqueId } if readme := utils.ReadPlainFromOpaque(space.Opaque, "readme"); readme != "" { readmeID, err := storagespace.ParseID(readme) @@ -456,7 +465,7 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up Status: &v1beta11.Status{Code: v1beta11.Code_CODE_NOT_FOUND, Message: "decomposedFS: space readme resource not found"}, }, nil } - metadata[xattrs.SpaceReadmeAttr] = readmeID.OpaqueId + metadata[prefixes.SpaceReadmeAttr] = readmeID.OpaqueId } } @@ -490,9 +499,9 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up if !IsManager(sp) { // We are not a space manager. We need to check for additional permissions. - k := []string{xattrs.NameAttr, xattrs.SpaceDescriptionAttr} + k := []string{prefixes.NameAttr, prefixes.SpaceDescriptionAttr} if !IsEditor(sp) { - k = append(k, xattrs.SpaceReadmeAttr, xattrs.SpaceAliasAttr, xattrs.SpaceImageAttr) + k = append(k, prefixes.SpaceReadmeAttr, prefixes.SpaceAliasAttr, prefixes.SpaceImageAttr) } if mapHasKey(metadata, k...) && !fs.p.ManageSpaceProperties(ctx, spaceID) { @@ -508,7 +517,7 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up } } - if mapHasKey(metadata, xattrs.QuotaAttr) && !fs.p.SetSpaceQuota(ctx, spaceID) { + if mapHasKey(metadata, prefixes.QuotaAttr) && !fs.p.SetSpaceQuota(ctx, spaceID) { return &provider.UpdateStorageSpaceResponse{ Status: &v1beta11.Status{Code: v1beta11.Code_CODE_PERMISSION_DENIED}, }, nil @@ -555,7 +564,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De return err } - st, err := n.SpaceRoot.Xattr(xattrs.SpaceTypeAttr) + st, err := n.SpaceRoot.Xattr(prefixes.SpaceTypeAttr) if err != nil { return errtypes.InternalError(fmt.Sprintf("space %s does not have a spacetype, possible corrupt decompsedfs", n.ID)) } @@ -585,7 +594,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De return errtypes.NewErrtypeFromStatus(status.NewInvalid(ctx, "can't purge enabled space")) } - spaceType, err := n.Xattr(xattrs.SpaceTypeAttr) + spaceType, err := n.Xattr(prefixes.SpaceTypeAttr) if err != nil { return err } @@ -596,7 +605,7 @@ func (fs *Decomposedfs) DeleteStorageSpace(ctx context.Context, req *provider.De } // remove space metadata - if err := os.RemoveAll(filepath.Join(fs.o.Root, "spaces", lookup.Pathify(spaceID, 1, 2))); err != nil { + if err := os.RemoveAll(fs.getSpaceRoot(spaceID)); err != nil { return err } @@ -691,7 +700,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, var err error // TODO apply more filters var sname string - if sname, err = n.SpaceRoot.Xattr(xattrs.SpaceNameAttr); err != nil { + if sname, err = n.SpaceRoot.Xattr(prefixes.SpaceNameAttr); err != nil { // FIXME: Is that a severe problem? appctx.GetLogger(ctx).Debug().Err(err).Msg("space does not have a name attribute") } @@ -796,7 +805,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, // Mtime is set either as node.tmtime or as fi.mtime below } - if space.SpaceType, err = n.SpaceRoot.Xattr(xattrs.SpaceTypeAttr); err != nil { + if space.SpaceType, err = n.SpaceRoot.Xattr(prefixes.SpaceTypeAttr); err != nil { appctx.GetLogger(ctx).Debug().Err(err).Msg("space does not have a type attribute") } @@ -845,7 +854,7 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, } // quota - quotaAttr, ok := spaceAttributes[xattrs.QuotaAttr] + quotaAttr, ok := spaceAttributes[prefixes.QuotaAttr] if ok { // make sure we have a proper signed int // we use the same magic numbers to indicate: @@ -861,23 +870,23 @@ func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, return nil, err } } - spaceImage, ok := spaceAttributes[xattrs.SpaceImageAttr] + spaceImage, ok := spaceAttributes[prefixes.SpaceImageAttr] if ok { space.Opaque = utils.AppendPlainToOpaque(space.Opaque, "image", storagespace.FormatResourceID( provider.ResourceId{StorageId: space.Root.StorageId, SpaceId: space.Root.SpaceId, OpaqueId: spaceImage}, )) } - spaceDescription, ok := spaceAttributes[xattrs.SpaceDescriptionAttr] + spaceDescription, ok := spaceAttributes[prefixes.SpaceDescriptionAttr] if ok { space.Opaque = utils.AppendPlainToOpaque(space.Opaque, "description", spaceDescription) } - spaceReadme, ok := spaceAttributes[xattrs.SpaceReadmeAttr] + spaceReadme, ok := spaceAttributes[prefixes.SpaceReadmeAttr] if ok { space.Opaque = utils.AppendPlainToOpaque(space.Opaque, "readme", storagespace.FormatResourceID( provider.ResourceId{StorageId: space.Root.StorageId, SpaceId: space.Root.SpaceId, OpaqueId: spaceReadme}, )) } - spaceAlias, ok := spaceAttributes[xattrs.SpaceAliasAttr] + spaceAlias, ok := spaceAttributes[prefixes.SpaceAliasAttr] if ok { space.Opaque = utils.AppendPlainToOpaque(space.Opaque, "spaceAlias", spaceAlias) } @@ -903,3 +912,7 @@ func isGrantExpired(g *provider.Grant) bool { } return time.Now().After(time.Unix(int64(g.Expiration.Seconds), int64(g.Expiration.Nanos))) } + +func (fs *Decomposedfs) getSpaceRoot(spaceID string) string { + return filepath.Join(fs.o.Root, "spaces", lookup.Pathify(spaceID, 1, 2)) +} diff --git a/pkg/storage/utils/decomposedfs/testhelpers/helpers.go b/pkg/storage/utils/decomposedfs/testhelpers/helpers.go index ba18e77452..9a69a3e524 100644 --- a/pkg/storage/utils/decomposedfs/testhelpers/helpers.go +++ b/pkg/storage/utils/decomposedfs/testhelpers/helpers.go @@ -25,9 +25,9 @@ import ( "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" "github.com/cs3org/reva/v2/pkg/storagespace" "github.com/google/uuid" - "github.com/pkg/xattr" "github.com/stretchr/testify/mock" userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" @@ -207,6 +207,7 @@ func (t *TestEnv) CreateTestFile(name, blobID, parentID, spaceID string, blobSiz name, blobSize, blobID, + providerv1beta1.ResourceType_RESOURCE_TYPE_FILE, nil, t.Lookup, ) @@ -218,12 +219,18 @@ func (t *TestEnv) CreateTestFile(name, blobID, parentID, spaceID string, blobSiz if err != nil { return nil, err } + if xattrs.UsesExternalMetadataFile() { + _, err = os.OpenFile(xattrs.MetadataPath(nodePath), os.O_CREATE, 0700) + if err != nil { + return nil, err + } + } err = n.WriteAllNodeMetadata() if err != nil { return nil, err } // Link in parent - childNameLink := filepath.Join(n.ParentInternalPath(), n.Name) + childNameLink := filepath.Join(n.ParentPath(), n.Name) err = os.Symlink("../../../../../"+lookup.Pathify(n.ID, 4, 2), childNameLink) if err != nil { return nil, err @@ -276,7 +283,7 @@ func (t *TestEnv) CreateTestStorageSpace(typ string, quota *providerv1beta1.Quot if err != nil { return nil, err } - if err = xattr.Set(h.InternalPath(), xattrs.SpaceNameAttr, []byte("username")); err != nil { + if err = xattrs.Set(h.InternalPath(), prefixes.SpaceNameAttr, "username"); err != nil { return nil, err } diff --git a/pkg/storage/utils/decomposedfs/tree/tree.go b/pkg/storage/utils/decomposedfs/tree/tree.go index 331945bac6..26b911799b 100644 --- a/pkg/storage/utils/decomposedfs/tree/tree.go +++ b/pkg/storage/utils/decomposedfs/tree/tree.go @@ -27,6 +27,7 @@ import ( iofs "io/fs" "os" "path/filepath" + "regexp" "strconv" "strings" "time" @@ -38,6 +39,7 @@ import ( "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" "github.com/cs3org/reva/v2/pkg/storage/utils/filelocks" "github.com/cs3org/reva/v2/pkg/utils" "github.com/google/uuid" @@ -227,7 +229,7 @@ func (t *Tree) linkSpaceNode(spaceType, spaceID string) { // isRootNode checks if a node is a space root func isRootNode(nodePath string) bool { - attr, err := xattrs.Get(nodePath, xattrs.ParentidAttr) + attr, err := xattrs.Get(nodePath, prefixes.ParentidAttr) return err == nil && attr == node.RootID } @@ -253,6 +255,7 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node) error { if n.ID == "" { n.ID = uuid.New().String() } + n.SetType(provider.ResourceType_RESOURCE_TYPE_FILE) nodePath := n.InternalPath() if err := os.MkdirAll(filepath.Dir(nodePath), 0700); err != nil { @@ -262,6 +265,12 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node) error { if err != nil { return errors.Wrap(err, "Decomposedfs: error creating node") } + if xattrs.UsesExternalMetadataFile() { + _, err = os.Create(xattrs.MetadataPath(nodePath)) + if err != nil { + return errors.Wrap(err, "Decomposedfs: error creating node") + } + } err = n.WriteAllNodeMetadata() if err != nil { @@ -269,7 +278,7 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node) error { } // link child name to parent if it is new - childNameLink := filepath.Join(n.ParentInternalPath(), n.Name) + childNameLink := filepath.Join(n.ParentPath(), n.Name) var link string link, err = os.Readlink(childNameLink) if err == nil && link != "../"+n.ID { @@ -289,17 +298,17 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node) error { // CreateDir creates a new directory entry in the tree func (t *Tree) CreateDir(ctx context.Context, n *node.Node) (err error) { - if n.Exists { return errtypes.AlreadyExists(n.ID) // path? } // create a directory node + n.SetType(provider.ResourceType_RESOURCE_TYPE_CONTAINER) if n.ID == "" { n.ID = uuid.New().String() } - err = t.createNode(n) + err = t.createDirNode(n) if err != nil { return } @@ -310,7 +319,7 @@ func (t *Tree) CreateDir(ctx context.Context, n *node.Node) (err error) { // make child appear in listings relativeNodePath := filepath.Join("../../../../../", lookup.Pathify(n.ID, 4, 2)) - err = os.Symlink(relativeNodePath, filepath.Join(n.ParentInternalPath(), n.Name)) + err = os.Symlink(relativeNodePath, filepath.Join(n.ParentPath(), n.Name)) if err != nil { // no better way to check unfortunately if !strings.Contains(err.Error(), "file exists") { @@ -353,7 +362,7 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) if oldNode.ParentID == newNode.ParentID { // parentPath := t.lookup.InternalPath(oldNode.SpaceID, oldNode.ParentID) - parentPath := oldNode.ParentInternalPath() + parentPath := oldNode.ParentPath() // rename child err = os.Rename( @@ -365,7 +374,7 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) } // update name attribute - if err := oldNode.SetXattr(xattrs.NameAttr, newNode.Name); err != nil { + if err := oldNode.SetXattr(prefixes.NameAttr, newNode.Name); err != nil { return errors.Wrap(err, "Decomposedfs: could not set name attribute") } @@ -377,18 +386,18 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) // rename child err = os.Rename( - filepath.Join(oldNode.ParentInternalPath(), oldNode.Name), - filepath.Join(newNode.ParentInternalPath(), newNode.Name), + filepath.Join(oldNode.ParentPath(), oldNode.Name), + filepath.Join(newNode.ParentPath(), newNode.Name), ) if err != nil { return errors.Wrap(err, "Decomposedfs: could not move child") } // update target parentid and name - if err := oldNode.SetXattr(xattrs.ParentidAttr, newNode.ParentID); err != nil { + if err := oldNode.SetXattr(prefixes.ParentidAttr, newNode.ParentID); err != nil { return errors.Wrap(err, "Decomposedfs: could not set parentid attribute") } - if err := oldNode.SetXattr(xattrs.NameAttr, newNode.Name); err != nil { + if err := oldNode.SetXattr(prefixes.NameAttr, newNode.Name); err != nil { return errors.Wrap(err, "Decomposedfs: could not set name attribute") } @@ -474,7 +483,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { deletingSharedResource := ctx.Value(appctx.DeletingSharedResource) if deletingSharedResource != nil && deletingSharedResource.(bool) { - src := filepath.Join(n.ParentInternalPath(), n.Name) + src := filepath.Join(n.ParentPath(), n.Name) return os.Remove(src) } @@ -486,7 +495,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { // set origin location in metadata nodePath := n.InternalPath() - if err := n.SetXattr(xattrs.TrashOriginAttr, origin); err != nil { + if err := n.SetXattr(prefixes.TrashOriginAttr, origin); err != nil { return err } @@ -507,7 +516,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { trashLink := filepath.Join(t.root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) if err := os.MkdirAll(filepath.Dir(trashLink), 0700); err != nil { // Roll back changes - _ = n.RemoveXattr(xattrs.TrashOriginAttr) + _ = n.RemoveXattr(prefixes.TrashOriginAttr) return err } @@ -520,7 +529,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { err = os.Symlink("../../../../../nodes/"+lookup.Pathify(n.ID, 4, 2)+node.TrashIDDelimiter+deletionTime, trashLink) if err != nil { // Roll back changes - _ = n.RemoveXattr(xattrs.TrashOriginAttr) + _ = n.RemoveXattr(prefixes.TrashOriginAttr) return } @@ -533,22 +542,29 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { // To roll back changes // TODO remove symlink // Roll back changes - _ = n.RemoveXattr(xattrs.TrashOriginAttr) + _ = n.RemoveXattr(prefixes.TrashOriginAttr) return } + if xattrs.UsesExternalMetadataFile() { + err = os.Rename(xattrs.MetadataPath(nodePath), xattrs.MetadataPath(trashPath)) + if err != nil { + _ = n.RemoveXattr(prefixes.TrashOriginAttr) + _ = os.Rename(trashPath, nodePath) + return + } + } // Remove lock file if it exists _ = os.Remove(n.LockFilePath()) // finally remove the entry from the parent dir - src := filepath.Join(n.ParentInternalPath(), n.Name) - err = os.Remove(src) + err = os.Remove(filepath.Join(n.ParentPath(), n.Name)) if err != nil { // To roll back changes // TODO revert the rename // TODO remove symlink // Roll back changes - _ = n.RemoveXattr(xattrs.TrashOriginAttr) + _ = n.RemoveXattr(prefixes.TrashOriginAttr) return } @@ -589,7 +605,7 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa } // add the entry for the parent dir - err = os.Symlink("../../../../../"+lookup.Pathify(recycleNode.ID, 4, 2), filepath.Join(targetNode.ParentInternalPath(), targetNode.Name)) + err = os.Symlink("../../../../../"+lookup.Pathify(recycleNode.ID, 4, 2), filepath.Join(targetNode.ParentPath(), targetNode.Name)) if err != nil { return err } @@ -603,23 +619,37 @@ func (t *Tree) RestoreRecycleItemFunc(ctx context.Context, spaceid, key, trashPa if err != nil { return err } + if xattrs.UsesExternalMetadataFile() { + err = os.Rename(xattrs.MetadataPath(deletedNodePath), xattrs.MetadataPath(nodePath)) + if err != nil { + return err + } + } } targetNode.Exists = true // update name attribute - if err := recycleNode.SetXattr(xattrs.NameAttr, targetNode.Name); err != nil { + if err := recycleNode.SetXattr(prefixes.NameAttr, targetNode.Name); err != nil { return errors.Wrap(err, "Decomposedfs: could not set name attribute") } // set ParentidAttr to restorePath's node parent id if trashPath != "" { - if err := recycleNode.SetXattr(xattrs.ParentidAttr, targetNode.ParentID); err != nil { + if err := recycleNode.SetXattr(prefixes.ParentidAttr, targetNode.ParentID); err != nil { return errors.Wrap(err, "Decomposedfs: could not set name attribute") } } // delete item link in trash - if err = os.Remove(trashItem); err != nil { + deletePath := trashItem + if trashPath != "" && trashPath != "/" { + resolvedTrashRoot, err := filepath.EvalSymlinks(trashItem) + if err != nil { + return errors.Wrap(err, "Decomposedfs: could not resolve trash root") + } + deletePath = filepath.Join(resolvedTrashRoot, trashPath) + } + if err = os.Remove(deletePath); err != nil { log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trash item") } @@ -645,38 +675,23 @@ func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, spaceid, key string, pa return nil, nil, err } - // only the root node is trashed, the rest is still in normal file system - children, err := os.ReadDir(deletedNodePath) - var nodes []*node.Node - for _, c := range children { - n, _, _, _, err := t.readRecycleItem(ctx, spaceid, key, filepath.Join(path, c.Name())) - if err != nil { - return nil, nil, err - } - nodes, err = appendChildren(ctx, n, nodes) - if err != nil { - return nil, nil, err - } - } - fn := func() error { if err := t.removeNode(deletedNodePath, rn); err != nil { return err } // delete item link in trash - if err = os.Remove(trashItem); err != nil { - log.Error().Err(err).Str("trashItem", trashItem).Msg("error deleting trash item") - return err - } - - // delete children - for i := len(nodes) - 1; i >= 0; i-- { - n := nodes[i] - if err := t.removeNode(n.InternalPath(), n); err != nil { - return err + deletePath := trashItem + if path != "" && path != "/" { + resolvedTrashRoot, err := filepath.EvalSymlinks(trashItem) + if err != nil { + return errors.Wrap(err, "Decomposedfs: could not resolve trash root") } - + deletePath = filepath.Join(resolvedTrashRoot, path) + } + if err = os.Remove(deletePath); err != nil { + log.Error().Err(err).Str("deletePath", deletePath).Msg("error deleting trash item") + return err } return nil @@ -688,14 +703,20 @@ func (t *Tree) PurgeRecycleItemFunc(ctx context.Context, spaceid, key string, pa func (t *Tree) removeNode(path string, n *node.Node) error { // delete the actual node if err := utils.RemoveItem(path); err != nil { - log.Error().Err(err).Str("path", path).Msg("error node") + log.Error().Err(err).Str("path", path).Msg("error purging node") return err } + if xattrs.UsesExternalMetadataFile() { + if err := utils.RemoveItem(xattrs.MetadataPath(path)); err != nil { + log.Error().Err(err).Str("path", xattrs.MetadataPath(path)).Msg("error purging node metadata") + return err + } + } // delete blob from blobstore if n.BlobID != "" { if err := t.DeleteBlob(n); err != nil { - log.Error().Err(err).Str("blobID", n.BlobID).Msg("error deleting nodes blob") + log.Error().Err(err).Str("blobID", n.BlobID).Msg("error purging nodes blob") return err } } @@ -707,6 +728,10 @@ func (t *Tree) removeNode(path string, n *node.Node) error { return err } for _, rev := range revs { + if xattrs.IsMetaFile(rev) { + continue + } + bID, err := node.ReadBlobIDAttr(rev) if err != nil { log.Error().Err(err).Str("revision", rev).Msg("error reading blobid attribute") @@ -757,7 +782,7 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err // TODO none, sync and async? if !n.HasPropagation() { - sublog.Debug().Str("attr", xattrs.PropagationAttr).Msg("propagation attribute not set or unreadable, not propagating") + sublog.Debug().Str("attr", prefixes.PropagationAttr).Msg("propagation attribute not set or unreadable, not propagating") // if the attribute is not set treat it as false / none / no propagation return nil } @@ -839,8 +864,8 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err } } - // update the tree time of the node - if err = n.SetXattrWithLock(xattrs.TreesizeAttr, strconv.FormatUint(newSize, 10), nodeLock); err != nil { + // update the tree size of the node + if err = n.SetXattrWithLock(prefixes.TreesizeAttr, strconv.FormatUint(newSize, 10), nodeLock); err != nil { return err } @@ -861,50 +886,46 @@ func (t *Tree) Propagate(ctx context.Context, n *node.Node, sizeDiff int64) (err return } -func calculateTreeSize(ctx context.Context, nodePath string) (uint64, error) { +func calculateTreeSize(ctx context.Context, childrenPath string) (uint64, error) { var size uint64 - f, err := os.Open(nodePath) + f, err := os.Open(childrenPath) if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("could not open dir") + appctx.GetLogger(ctx).Error().Err(err).Str("childrenPath", childrenPath).Msg("could not open dir") return 0, err } defer f.Close() names, err := f.Readdirnames(0) if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("nodepath", nodePath).Msg("could not read dirnames") + appctx.GetLogger(ctx).Error().Err(err).Str("childrenPath", childrenPath).Msg("could not read dirnames") return 0, err } for i := range names { - cPath := filepath.Join(nodePath, names[i]) - info, err := os.Stat(cPath) + cPath := filepath.Join(childrenPath, names[i]) + resolvedPath, err := filepath.EvalSymlinks(cPath) if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not stat child entry") + appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not resolve child entry symlink") continue // continue after an error } - if !info.IsDir() { - blobSize, err := node.ReadBlobSizeAttr(cPath) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not read blobSize xattr") - continue // continue after an error - } - size += uint64(blobSize) + + // raw read of the attributes for performance reasons + attribs, err := xattrs.All(resolvedPath) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("childpath", cPath).Msg("could not read attributes of child entry") + continue // continue after an error + } + sizeAttr := "" + if attribs[prefixes.TypeAttr] == strconv.FormatUint(uint64(provider.ResourceType_RESOURCE_TYPE_FILE), 10) { + sizeAttr = attribs[prefixes.BlobsizeAttr] } else { - // read from attr - var b string - // xattrs.Get will follow the symlink - if b, err = xattrs.Get(cPath, xattrs.TreesizeAttr); err != nil { - // TODO recursively descend and recalculate treesize - continue // continue after an error - } - csize, err := strconv.ParseUint(b, 10, 64) - if err != nil { - // TODO recursively descend and recalculate treesize - continue // continue after an error - } - size += csize + sizeAttr = attribs[prefixes.TreesizeAttr] } + csize, err := strconv.ParseInt(sizeAttr, 10, 64) + if err != nil { + return 0, errors.Wrapf(err, "invalid blobsize xattr format") + } + size += uint64(csize) } return size, err } @@ -936,47 +957,23 @@ func (t *Tree) DeleteBlob(node *node.Node) error { } // TODO check if node exists? -func (t *Tree) createNode(n *node.Node) (err error) { +func (t *Tree) createDirNode(n *node.Node) (err error) { // create a directory node nodePath := n.InternalPath() - if err = os.MkdirAll(nodePath, 0700); err != nil { + if err := os.MkdirAll(nodePath, 0700); err != nil { return errors.Wrap(err, "Decomposedfs: error creating node") } + if xattrs.UsesExternalMetadataFile() { + _, err = os.Create(xattrs.MetadataPath(nodePath)) + if err != nil { + return errors.Wrap(err, "Decomposedfs: error creating node") + } + } return n.WriteAllNodeMetadata() } -// readTrashLink returns nodeID and timestamp -func readTrashLink(path string) (string, string, error) { - link, err := os.Readlink(path) - if err != nil { - return "", "", err - } - // ../../../../../nodes/e5/6c/75/a8/-d235-4cbb-8b4e-48b6fd0f2094.T.2022-02-16T14:38:11.769917408Z - // TODO use filepath.Separator to support windows - link = strings.ReplaceAll(link, "/", "") - // ..........nodese56c75a8-d235-4cbb-8b4e-48b6fd0f2094.T.2022-02-16T14:38:11.769917408Z - if link[0:15] != "..........nodes" || link[51:54] != ".T." { - return "", "", errtypes.InternalError("malformed trash link") - } - return link[15:51], link[54:], nil -} - -// readTrashChildLink returns nodeID -func readTrashChildLink(path string) (string, error) { - link, err := os.Readlink(path) - if err != nil { - return "", err - } - // ../../../../../e5/6c/75/a8/-d235-4cbb-8b4e-48b6fd0f2094 - // TODO use filepath.Separator to support windows - link = strings.ReplaceAll(link, "/", "") - // ..........e56c75a8-d235-4cbb-8b4e-48b6fd0f2094 - if link[0:10] != ".........." { - return "", errtypes.InternalError("malformed trash child link") - } - return link[10:], nil -} +var nodeIDRegep = regexp.MustCompile(`.*/nodes/([^.]*).*`) // TODO refactor the returned params into Node properties? would make all the path transformations go away... func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) (recycleNode *node.Node, trashItem string, deletedNodePath string, origin string, err error) { @@ -984,54 +981,49 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) ( return nil, "", "", "", errtypes.InternalError("key is empty") } - var nodeID, timeSuffix string + var nodeID string - trashItem = filepath.Join(t.lookup.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "trash", lookup.Pathify(key, 4, 2), path) - if path == "" || path == "/" { - nodeID, timeSuffix, err = readTrashLink(trashItem) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Msg("error reading trash link") - return - } - deletedNodePath = t.lookup.InternalPath(spaceID, nodeID) + node.TrashIDDelimiter + timeSuffix - } else { - // children of a trashed node are in the nodes folder - nodeID, err = readTrashChildLink(trashItem) - if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("trashItem", trashItem).Msg("error reading trash child link") - return - } - deletedNodePath = t.lookup.InternalPath(spaceID, nodeID) + trashItem = filepath.Join(t.lookup.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "trash", lookup.Pathify(key, 4, 2)) + resolvedTrashItem, err := filepath.EvalSymlinks(trashItem) + if err != nil { + return + } + deletedNodePath, err = filepath.EvalSymlinks(filepath.Join(resolvedTrashItem, path)) + if err != nil { + return } + nodeID = nodeIDRegep.ReplaceAllString(deletedNodePath, "$1") + nodeID = strings.ReplaceAll(nodeID, "/", "") - recycleNode = node.New(spaceID, nodeID, "", "", 0, "", nil, t.lookup) + recycleNode = node.New(spaceID, nodeID, "", "", 0, "", provider.ResourceType_RESOURCE_TYPE_INVALID, nil, t.lookup) recycleNode.SpaceRoot, err = node.ReadNode(ctx, t.lookup, spaceID, spaceID, false) if err != nil { return } + recycleNode.SetType(node.TypeFromPath(recycleNode.InternalPath())) var attrStr string // lookup blobID in extended attributes - if attrStr, err = xattrs.Get(deletedNodePath, xattrs.BlobIDAttr); err == nil { + if attrStr, err = xattrs.Get(deletedNodePath, prefixes.BlobIDAttr); err == nil { recycleNode.BlobID = attrStr } else { return } // lookup blobSize in extended attributes - if recycleNode.Blobsize, err = xattrs.GetInt64(deletedNodePath, xattrs.BlobsizeAttr); err != nil { + if recycleNode.Blobsize, err = xattrs.GetInt64(deletedNodePath, prefixes.BlobsizeAttr); err != nil { return } // lookup parent id in extended attributes - if attrStr, err = xattrs.Get(deletedNodePath, xattrs.ParentidAttr); err == nil { + if attrStr, err = xattrs.Get(deletedNodePath, prefixes.ParentidAttr); err == nil { recycleNode.ParentID = attrStr } else { return } // lookup name in extended attributes - if attrStr, err = xattrs.Get(deletedNodePath, xattrs.NameAttr); err == nil { + if attrStr, err = xattrs.Get(deletedNodePath, prefixes.NameAttr); err == nil { recycleNode.Name = attrStr } else { return @@ -1040,9 +1032,8 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) ( // get origin node, is relative to space root origin = "/" - trashRootItemPath := filepath.Join(t.lookup.InternalRoot(), "spaces", lookup.Pathify(spaceID, 1, 2), "trash", lookup.Pathify(key, 4, 2)) // lookup origin path in extended attributes - if attrStr, err = xattrs.Get(trashRootItemPath, xattrs.TrashOriginAttr); err == nil { + if attrStr, err = xattrs.Get(resolvedTrashItem, prefixes.TrashOriginAttr); err == nil { origin = filepath.Join(attrStr, path) } else { log.Error().Err(err).Str("trashItem", trashItem).Str("deletedNodePath", deletedNodePath).Msg("could not read origin path, restoring to /") @@ -1050,29 +1041,3 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) ( return } - -// appendChildren appends `n` and all its children to `nodes` -func appendChildren(ctx context.Context, n *node.Node, nodes []*node.Node) ([]*node.Node, error) { - nodes = append(nodes, n) - - children, err := os.ReadDir(n.InternalPath()) - if err != nil { - // TODO: How to differentiate folders from files? - return nodes, nil - } - - for _, c := range children { - cn, err := n.Child(ctx, c.Name()) - if err != nil { - // continue? - return nil, err - } - nodes, err = appendChildren(ctx, cn, nodes) - if err != nil { - // continue? - return nil, err - } - } - - return nodes, nil -} diff --git a/pkg/storage/utils/decomposedfs/tree/tree_test.go b/pkg/storage/utils/decomposedfs/tree/tree_test.go index 69200e7e3a..bb7370abca 100644 --- a/pkg/storage/utils/decomposedfs/tree/tree_test.go +++ b/pkg/storage/utils/decomposedfs/tree/tree_test.go @@ -21,6 +21,7 @@ package tree_test import ( "os" "path" + "path/filepath" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" @@ -28,8 +29,8 @@ import ( helpers "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/testhelpers" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" "github.com/google/uuid" - "github.com/pkg/xattr" "github.com/stretchr/testify/mock" . "github.com/onsi/ginkgo/v2" @@ -119,9 +120,12 @@ var _ = Describe("Tree", func() { It("sets the trash origin xattr", func() { trashPath := path.Join(env.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) - attr, err := xattr.Get(trashPath, xattrs.TrashOriginAttr) + resolveTrashPath, err := filepath.EvalSymlinks(trashPath) Expect(err).ToNot(HaveOccurred()) - Expect(string(attr)).To(Equal("/dir1/file1")) + + attr, err := xattrs.Get(resolveTrashPath, prefixes.TrashOriginAttr) + Expect(err).ToNot(HaveOccurred()) + Expect(attr).To(Equal("/dir1/file1")) }) It("does not delete the blob from the blobstore", func() { @@ -398,7 +402,7 @@ var _ = Describe("Tree", func() { stopdir, err := env.CreateTestDir("testdir/stophere", &provider.Reference{ResourceId: env.SpaceRootRes}) Expect(err).ToNot(HaveOccurred()) - err = xattr.Set(stopdir.InternalPath(), xattrs.PropagationAttr, []byte("0")) + err = xattrs.Set(stopdir.InternalPath(), prefixes.PropagationAttr, "0") Expect(err).ToNot(HaveOccurred()) otherdir, err := env.CreateTestDir("testdir/stophere/lotsofbytes", &provider.Reference{ResourceId: env.SpaceRootRes}) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/storage/utils/decomposedfs/upload/processing.go b/pkg/storage/utils/decomposedfs/upload/processing.go index 7b753ae7f5..46a4e3f934 100644 --- a/pkg/storage/utils/decomposedfs/upload/processing.go +++ b/pkg/storage/utils/decomposedfs/upload/processing.go @@ -41,6 +41,7 @@ import ( "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" "github.com/cs3org/reva/v2/pkg/storage/utils/filelocks" "github.com/cs3org/reva/v2/pkg/storagespace" "github.com/cs3org/reva/v2/pkg/utils" @@ -252,6 +253,7 @@ func CreateNodeForUpload(upload *Upload, initAttrs map[string]string) (*node.Nod upload.Info.Storage["NodeName"], fsize, upload.Info.ID, + provider.ResourceType_RESOURCE_TYPE_FILE, nil, upload.lu, ) @@ -279,11 +281,12 @@ func CreateNodeForUpload(upload *Upload, initAttrs map[string]string) (*node.Nod } // overwrite technical information - initAttrs[xattrs.ParentidAttr] = n.ParentID - initAttrs[xattrs.NameAttr] = n.Name - initAttrs[xattrs.BlobIDAttr] = n.BlobID - initAttrs[xattrs.BlobsizeAttr] = strconv.FormatInt(n.Blobsize, 10) - initAttrs[xattrs.StatusPrefix] = node.ProcessingStatus + upload.Info.ID + initAttrs[prefixes.TypeAttr] = strconv.FormatInt(int64(n.Type()), 10) + initAttrs[prefixes.ParentidAttr] = n.ParentID + initAttrs[prefixes.NameAttr] = n.Name + initAttrs[prefixes.BlobIDAttr] = n.BlobID + initAttrs[prefixes.BlobsizeAttr] = strconv.FormatInt(n.Blobsize, 10) + initAttrs[prefixes.StatusPrefix] = node.ProcessingStatus + upload.Info.ID // update node metadata with new blobid etc err = n.SetXattrsWithLock(initAttrs, lock) @@ -299,11 +302,11 @@ func CreateNodeForUpload(upload *Upload, initAttrs map[string]string) (*node.Nod } // add etag to metadata - nfi, err := os.Stat(n.InternalPath()) + tmtime, err := n.GetTMTime() if err != nil { return nil, err } - upload.Info.MetaData["etag"], _ = node.CalculateEtag(n.ID, nfi.ModTime()) + upload.Info.MetaData["etag"], _ = node.CalculateEtag(n.ID, tmtime) // update nodeid for later upload.Info.Storage["NodeId"] = n.ID @@ -325,6 +328,11 @@ func initNewNode(upload *Upload, n *node.Node, fsize uint64) (*flock.Flock, erro if _, err := os.Create(n.InternalPath()); err != nil { return nil, err } + if xattrs.UsesExternalMetadataFile() { + if _, err := os.Create(xattrs.MetadataPath(n.InternalPath())); err != nil { + return nil, err + } + } lock, err := filelocks.AcquireWriteLock(n.InternalPath()) if err != nil { @@ -337,7 +345,7 @@ func initNewNode(upload *Upload, n *node.Node, fsize uint64) (*flock.Flock, erro } // link child name to parent if it is new - childNameLink := filepath.Join(n.ParentInternalPath(), n.Name) + childNameLink := filepath.Join(n.ParentPath(), n.Name) link, err := os.Readlink(childNameLink) if err == nil && link != "../"+n.ID { if err := os.Remove(childNameLink); err != nil { @@ -363,7 +371,7 @@ func updateExistingNode(upload *Upload, n *node.Node, spaceID string, fsize uint return nil, err } - vfi, err := os.Stat(old.InternalPath()) + tmtime, err := old.GetTMTime() if err != nil { return nil, err } @@ -371,7 +379,7 @@ func updateExistingNode(upload *Upload, n *node.Node, spaceID string, fsize uint // When the if-match header was set we need to check if the // etag still matches before finishing the upload. if ifMatch, ok := upload.Info.MetaData["if-match"]; ok { - targetEtag, err := node.CalculateEtag(n.ID, vfi.ModTime()) + targetEtag, err := node.CalculateEtag(n.ID, tmtime) switch { case err != nil: return nil, errtypes.InternalError(err.Error()) @@ -380,7 +388,7 @@ func updateExistingNode(upload *Upload, n *node.Node, spaceID string, fsize uint } } - upload.versionsPath = upload.lu.InternalPath(spaceID, n.ID+node.RevisionIDDelimiter+vfi.ModTime().UTC().Format(time.RFC3339Nano)) + upload.versionsPath = upload.lu.InternalPath(spaceID, n.ID+node.RevisionIDDelimiter+tmtime.UTC().Format(time.RFC3339Nano)) upload.sizeDiff = int64(fsize) - old.Blobsize upload.Info.MetaData["versionsPath"] = upload.versionsPath upload.Info.MetaData["sizeDiff"] = strconv.Itoa(int(upload.sizeDiff)) @@ -397,18 +405,24 @@ func updateExistingNode(upload *Upload, n *node.Node, spaceID string, fsize uint if _, err := os.Create(upload.versionsPath); err != nil { return lock, err } + if xattrs.UsesExternalMetadataFile() { + if _, err := os.Create(xattrs.MetadataPath(upload.versionsPath)); err != nil { + return lock, err + } + } // copy blob metadata to version node if err := xattrs.CopyMetadataWithSourceLock(targetPath, upload.versionsPath, func(attributeName string) bool { - return strings.HasPrefix(attributeName, xattrs.ChecksumPrefix) || - attributeName == xattrs.BlobIDAttr || - attributeName == xattrs.BlobsizeAttr + return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || + attributeName == prefixes.TypeAttr || + attributeName == prefixes.BlobIDAttr || + attributeName == prefixes.BlobsizeAttr }, lock); err != nil { return lock, err } // keep mtime from previous version - if err := os.Chtimes(upload.versionsPath, vfi.ModTime(), vfi.ModTime()); err != nil { + if err := os.Chtimes(upload.versionsPath, tmtime, tmtime); err != nil { return lock, errtypes.InternalError(fmt.Sprintf("failed to change mtime of version node: %s", err)) } diff --git a/pkg/storage/utils/decomposedfs/upload/upload.go b/pkg/storage/utils/decomposedfs/upload/upload.go index 3d689c064c..2293b9e2bc 100644 --- a/pkg/storage/utils/decomposedfs/upload/upload.go +++ b/pkg/storage/utils/decomposedfs/upload/upload.go @@ -43,6 +43,7 @@ import ( "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" "github.com/cs3org/reva/v2/pkg/utils" "github.com/golang-jwt/jwt" "github.com/pkg/errors" @@ -230,9 +231,9 @@ func (upload *Upload) FinishUpload(_ context.Context) error { // update checksums attrs := map[string]string{ - xattrs.ChecksumPrefix + "sha1": string(sha1h.Sum(nil)), - xattrs.ChecksumPrefix + "md5": string(md5h.Sum(nil)), - xattrs.ChecksumPrefix + "adler32": string(adler32h.Sum(nil)), + prefixes.ChecksumPrefix + "sha1": string(sha1h.Sum(nil)), + prefixes.ChecksumPrefix + "md5": string(md5h.Sum(nil)), + prefixes.ChecksumPrefix + "adler32": string(adler32h.Sum(nil)), } n, err := CreateNodeForUpload(upload, attrs) @@ -366,9 +367,9 @@ func (upload *Upload) cleanup(cleanNode, cleanBin, cleanInfo bool) { } // no old version was present - remove child entry - src := filepath.Join(upload.Node.ParentInternalPath(), upload.Node.Name) + src := filepath.Join(upload.Node.ParentPath(), upload.Node.Name) if err := os.Remove(src); err != nil { - upload.log.Info().Str("path", upload.Node.ParentInternalPath()).Err(err).Msg("removing node from parent failed") + upload.log.Info().Str("path", upload.Node.ParentPath()).Err(err).Msg("removing node from parent failed") } // remove node from upload as it no longer exists @@ -376,9 +377,10 @@ func (upload *Upload) cleanup(cleanNode, cleanBin, cleanInfo bool) { default: if err := xattrs.CopyMetadata(upload.Node.InternalPath(), p, func(attributeName string) bool { - return strings.HasPrefix(attributeName, xattrs.ChecksumPrefix) || - attributeName == xattrs.BlobIDAttr || - attributeName == xattrs.BlobsizeAttr + return strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || + attributeName == prefixes.TypeAttr || + attributeName == prefixes.BlobIDAttr || + attributeName == prefixes.BlobsizeAttr }); err != nil { upload.log.Info().Str("versionpath", p).Str("nodepath", upload.Node.InternalPath()).Err(err).Msg("renaming version node failed") } diff --git a/pkg/storage/utils/decomposedfs/upload_test.go b/pkg/storage/utils/decomposedfs/upload_test.go index 4dfc4abf53..1c74394eab 100644 --- a/pkg/storage/utils/decomposedfs/upload_test.go +++ b/pkg/storage/utils/decomposedfs/upload_test.go @@ -39,9 +39,9 @@ import ( "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree" treemocks "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree/mocks" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" "github.com/cs3org/reva/v2/pkg/storagespace" "github.com/cs3org/reva/v2/tests/helpers" - "github.com/pkg/xattr" "github.com/stretchr/testify/mock" . "github.com/onsi/ginkgo/v2" @@ -173,7 +173,7 @@ var _ = Describe("File uploads", func() { // the space name attribute is the stop condition in the lookup h, err := lu.NodeFromResource(ctx, rootRef) Expect(err).ToNot(HaveOccurred()) - err = xattr.Set(h.InternalPath(), xattrs.SpaceNameAttr, []byte("username")) + err = xattrs.Set(h.InternalPath(), prefixes.SpaceNameAttr, "username") Expect(err).ToNot(HaveOccurred()) permissions.On("AssemblePermissions", mock.Anything, mock.Anything, mock.Anything).Return(provider.ResourcePermissions{ Stat: true, diff --git a/pkg/storage/utils/decomposedfs/xattrs/backend.go b/pkg/storage/utils/decomposedfs/xattrs/backend.go new file mode 100644 index 0000000000..a34760fbaf --- /dev/null +++ b/pkg/storage/utils/decomposedfs/xattrs/backend.go @@ -0,0 +1,36 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package xattrs + +import ( + xattrBackend "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/backend" +) + +// TODO This is currently a singleton because the trash code needs to be refactored before we can hide this behind a real metadata backend interface +var backend xattrBackend.Backend = xattrBackend.XattrsBackend{} + +// UseXattrsBackend configures decomposedfs to use xattrs for storing file attributes +func UseXattrsBackend() { + backend = xattrBackend.XattrsBackend{} +} + +// UseIniBackend configures decomposedfs to use ini files for storing file attributes +func UseIniBackend() { + backend = xattrBackend.NewIniBackend() +} diff --git a/pkg/storage/utils/decomposedfs/xattrs/backend/backend.go b/pkg/storage/utils/decomposedfs/xattrs/backend/backend.go new file mode 100644 index 0000000000..3eed86e614 --- /dev/null +++ b/pkg/storage/utils/decomposedfs/xattrs/backend/backend.go @@ -0,0 +1,77 @@ +// Copyright 2018-2023 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package backend + +import ( + "github.com/pkg/errors" +) + +var errUnconfiguredError = errors.New("no xattrs backend configured. Bailing out") + +// Backend defines the interface for file attribute backends +type Backend interface { + All(path string) (map[string]string, error) + Get(path, key string) (string, error) + GetInt64(path, key string) (int64, error) + List(path string) (attribs []string, err error) + Set(path, key, val string) error + SetMultiple(path string, attribs map[string]string) error + Remove(path, key string) error + + IsMetaFile(path string) bool + // UsesExternalMetadataFile returns true when the backend uses external metadata files + UsesExternalMetadataFile() bool + MetadataPath(path string) string +} + +// NullBackend is the default stub backend, used to enforce the configuration of a proper backend +type NullBackend struct{} + +// All reads all extended attributes for a node +func (NullBackend) All(path string) (map[string]string, error) { return nil, errUnconfiguredError } + +// Get an extended attribute value for the given key +func (NullBackend) Get(path, key string) (string, error) { return "", errUnconfiguredError } + +// GetInt64 reads a string as int64 from the xattrs +func (NullBackend) GetInt64(path, key string) (int64, error) { return 0, errUnconfiguredError } + +// List retrieves a list of names of extended attributes associated with the +// given path in the file system. +func (NullBackend) List(path string) ([]string, error) { return nil, errUnconfiguredError } + +// Set sets one attribute for the given path +func (NullBackend) Set(path string, key string, val string) error { return errUnconfiguredError } + +// SetMultiple sets a set of attribute for the given path +func (NullBackend) SetMultiple(path string, attribs map[string]string) error { + return errUnconfiguredError +} + +// Remove removes an extended attribute key +func (NullBackend) Remove(path string, key string) error { return errUnconfiguredError } + +// IsMetaFile returns whether the given path represents a meta file +func (NullBackend) IsMetaFile(path string) bool { return false } + +// UsesExternalMetadataFile returns true when the backend uses external metadata files +func (NullBackend) UsesExternalMetadataFile() bool { return false } + +// MetadataPath returns the path of the file holding the metadata for the given path +func (NullBackend) MetadataPath(path string) string { return "" } diff --git a/pkg/storage/utils/decomposedfs/xattrs/backend/backend_test.go b/pkg/storage/utils/decomposedfs/xattrs/backend/backend_test.go new file mode 100644 index 0000000000..dfa6ccf3a7 --- /dev/null +++ b/pkg/storage/utils/decomposedfs/xattrs/backend/backend_test.go @@ -0,0 +1,195 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package backend_test + +import ( + "os" + "path" + "strings" + + xattrsBackend "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/backend" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Backend", func() { + var ( + tmpdir string + file string + metafile string + + backend xattrsBackend.Backend + ) + + BeforeEach(func() { + var err error + tmpdir, err = os.MkdirTemp(os.TempDir(), "XattrsBackendTest-") + Expect(err).ToNot(HaveOccurred()) + }) + + JustBeforeEach(func() { + file = path.Join(tmpdir, "file") + metafile = backend.MetadataPath(file) + _, err := os.Create(metafile) + Expect(err).ToNot(HaveOccurred()) + }) + + AfterEach(func() { + if tmpdir != "" { + os.RemoveAll(tmpdir) + } + }) + + Describe("IniBackend", func() { + BeforeEach(func() { + backend = xattrsBackend.IniBackend{} + }) + + Describe("Set", func() { + It("sets an attribute", func() { + err := backend.Set(file, "foo", "bar") + Expect(err).ToNot(HaveOccurred()) + + content, err := os.ReadFile(metafile) + Expect(err).ToNot(HaveOccurred()) + Expect(string(content)).To(Equal("foo = bar\n")) + }) + + It("updates an attribute", func() { + err := backend.Set(file, "foo", "bar") + Expect(err).ToNot(HaveOccurred()) + err = backend.Set(file, "foo", "baz") + Expect(err).ToNot(HaveOccurred()) + + content, err := os.ReadFile(metafile) + Expect(err).ToNot(HaveOccurred()) + Expect(string(content)).To(Equal("foo = baz\n")) + }) + }) + + Describe("SetMultiple", func() { + It("sets attributes", func() { + err := backend.SetMultiple(file, map[string]string{"foo": "bar", "baz": "qux"}) + Expect(err).ToNot(HaveOccurred()) + + content, err := os.ReadFile(metafile) + Expect(err).ToNot(HaveOccurred()) + lines := strings.Split(strings.Trim(string(content), "\n"), "\n") + Expect(lines).To(ConsistOf("foo = bar", "baz = qux")) + }) + + It("updates an attribute", func() { + err := backend.Set(file, "foo", "bar") + Expect(err).ToNot(HaveOccurred()) + err = backend.SetMultiple(file, map[string]string{"foo": "bar", "baz": "qux"}) + Expect(err).ToNot(HaveOccurred()) + + content, err := os.ReadFile(metafile) + Expect(err).ToNot(HaveOccurred()) + lines := strings.Split(strings.Trim(string(content), "\n"), "\n") + Expect(lines).To(ConsistOf("foo = bar", "baz = qux")) + }) + }) + + Describe("All", func() { + It("returns the entries", func() { + err := os.WriteFile(metafile, []byte("foo=123\nbar=baz"), 0600) + Expect(err).ToNot(HaveOccurred()) + + v, err := backend.All(file) + Expect(err).ToNot(HaveOccurred()) + Expect(len(v)).To(Equal(2)) + Expect(v["foo"]).To(Equal("123")) + Expect(v["bar"]).To(Equal("baz")) + }) + + It("returns an empty map", func() { + v, err := backend.All(file) + Expect(err).ToNot(HaveOccurred()) + Expect(v).To(Equal(map[string]string{})) + }) + }) + + Describe("List", func() { + It("returns the entries", func() { + err := os.WriteFile(metafile, []byte("foo = 123\nbar = baz"), 0600) + Expect(err).ToNot(HaveOccurred()) + + v, err := backend.List(file) + Expect(err).ToNot(HaveOccurred()) + Expect(v).To(ConsistOf("foo", "bar")) + }) + + It("returns an empty list", func() { + v, err := backend.List(file) + Expect(err).ToNot(HaveOccurred()) + Expect(v).To(Equal([]string{})) + }) + }) + + Describe("Get", func() { + It("returns the attribute", func() { + err := os.WriteFile(metafile, []byte("foo = \"bar\"\n"), 0600) + Expect(err).ToNot(HaveOccurred()) + + v, err := backend.Get(file, "foo") + Expect(err).ToNot(HaveOccurred()) + Expect(v).To(Equal("bar")) + }) + + It("returns an error on unknown attributes", func() { + _, err := backend.Get(file, "foo") + Expect(err).To(HaveOccurred()) + }) + }) + + Describe("GetInt64", func() { + It("returns the attribute", func() { + err := os.WriteFile(metafile, []byte("foo=123\n"), 0600) + Expect(err).ToNot(HaveOccurred()) + + v, err := backend.GetInt64(file, "foo") + Expect(err).ToNot(HaveOccurred()) + Expect(v).To(Equal(int64(123))) + }) + + It("returns an error on unknown attributes", func() { + _, err := backend.GetInt64(file, "foo") + Expect(err).To(HaveOccurred()) + }) + }) + + Describe("Get", func() { + It("deletes an attribute", func() { + err := os.WriteFile(metafile, []byte("foo=bar\n"), 0600) + Expect(err).ToNot(HaveOccurred()) + + v, err := backend.Get(file, "foo") + Expect(err).ToNot(HaveOccurred()) + Expect(v).To(Equal("bar")) + + err = backend.Remove(file, "foo") + Expect(err).ToNot(HaveOccurred()) + + _, err = backend.Get(file, "foo") + Expect(err).To(HaveOccurred()) + }) + }) + }) +}) diff --git a/pkg/storage/utils/decomposedfs/xattrs/backend/ini.go b/pkg/storage/utils/decomposedfs/xattrs/backend/ini.go new file mode 100644 index 0000000000..9af4868190 --- /dev/null +++ b/pkg/storage/utils/decomposedfs/xattrs/backend/ini.go @@ -0,0 +1,250 @@ +// Copyright 2018-2023 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package backend + +import ( + "encoding/base64" + "os" + "strings" + "time" + + "github.com/bluele/gcache" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/xattrs/prefixes" + "github.com/pkg/xattr" + "github.com/rogpeppe/go-internal/lockedfile" + "gopkg.in/ini.v1" +) + +// IniBackend persists the attributes in INI format inside the file +type IniBackend struct { + metaCache gcache.Cache +} + +type cacheEntry struct { + mtime time.Time + meta *ini.File +} + +var encodedPrefixes = []string{prefixes.ChecksumPrefix, prefixes.MetadataPrefix, prefixes.GrantPrefix} + +// NewIniBackend returns a new IniBackend instance +func NewIniBackend() IniBackend { + return IniBackend{ + metaCache: gcache.New(1_000_000).LFU().Build(), + } +} + +// All reads all extended attributes for a node +func (b IniBackend) All(path string) (map[string]string, error) { + path = b.MetadataPath(path) + + ini, err := b.loadIni(path) + if err != nil { + return nil, err + } + attribs := ini.Section("").KeysHash() + for key, val := range attribs { + for _, prefix := range encodedPrefixes { + if strings.HasPrefix(key, prefix) { + valBytes, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return nil, err + } + attribs[key] = string(valBytes) + break + } + } + } + + return attribs, nil +} + +// Get an extended attribute value for the given key +func (b IniBackend) Get(path, key string) (string, error) { + path = b.MetadataPath(path) + + ini, err := b.loadIni(path) + if err != nil { + return "", err + } + if !ini.Section("").HasKey(key) { + return "", &xattr.Error{Op: "ini.get", Path: path, Name: key, Err: xattr.ENOATTR} + } + + val := ini.Section("").Key(key).Value() + for _, prefix := range encodedPrefixes { + if strings.HasPrefix(key, prefix) { + valBytes, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return "", err + } + return string(valBytes), nil + } + } + + return val, nil +} + +// GetInt64 reads a string as int64 from the xattrs +func (b IniBackend) GetInt64(path, key string) (int64, error) { + path = b.MetadataPath(path) + + ini, err := b.loadIni(path) + if err != nil { + return 0, err + } + if !ini.Section("").HasKey(key) { + return 0, &xattr.Error{Op: "ini.get", Path: path, Name: key, Err: xattr.ENOATTR} + } + return ini.Section("").Key(key).MustInt64(), nil +} + +// List retrieves a list of names of extended attributes associated with the +// given path in the file system. +func (b IniBackend) List(path string) ([]string, error) { + path = b.MetadataPath(path) + + ini, err := b.loadIni(path) + if err != nil { + return nil, err + } + return ini.Section("").KeyStrings(), nil +} + +// Set sets one attribute for the given path +func (b IniBackend) Set(path, key, val string) error { + path = b.MetadataPath(path) + + ini, err := b.loadIni(path) + if err != nil { + return err + } + + for _, prefix := range encodedPrefixes { + if strings.HasPrefix(key, prefix) { + val = base64.StdEncoding.EncodeToString([]byte(val)) + break + } + } + + ini.Section("").Key(key).SetValue(val) + + return b.saveIni(path, ini) +} + +// SetMultiple sets a set of attribute for the given path +func (b IniBackend) SetMultiple(path string, attribs map[string]string) error { + path = b.MetadataPath(path) + + ini, err := b.loadIni(path) + if err != nil { + return err + } + + for key, val := range attribs { + for _, prefix := range encodedPrefixes { + if strings.HasPrefix(key, prefix) { + val = base64.StdEncoding.EncodeToString([]byte(val)) + break + } + } + ini.Section("").Key(key).SetValue(val) + } + + return b.saveIni(path, ini) +} + +// Remove an extended attribute key +func (b IniBackend) Remove(path, key string) error { + path = b.MetadataPath(path) + + ini, err := b.loadIni(path) + if err != nil { + return err + } + + ini.Section("").DeleteKey(key) + + return b.saveIni(path, ini) +} + +func (b IniBackend) saveIni(path string, ini *ini.File) error { + lockedFile, err := lockedfile.OpenFile(path, os.O_TRUNC|os.O_WRONLY, 0600) + if err != nil { + return err + } + defer lockedFile.Close() + + _, err = ini.WriteTo(lockedFile) + if err != nil { + return err + } + fi, err := os.Stat(path) + if err != nil { + return err + } + + return b.metaCache.Set(path, cacheEntry{ + mtime: fi.ModTime(), + meta: ini, + }) +} + +func (b IniBackend) loadIni(path string) (*ini.File, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + if cachedIf, err := b.metaCache.Get(path); err == nil { + cached, ok := cachedIf.(cacheEntry) + if ok && cached.mtime == fi.ModTime() { + return cached.meta, nil + } + } + + lockedFile, err := lockedfile.Open(path) + if err != nil { + return nil, err + } + defer lockedFile.Close() + + iniFile, err := ini.Load(lockedFile) + if err != nil { + return nil, err + } + + err = b.metaCache.Set(path, cacheEntry{ + mtime: fi.ModTime(), + meta: iniFile, + }) + if err != nil { + return nil, err + } + + return iniFile, nil +} + +// IsMetaFile returns whether the given path represents a meta file +func (IniBackend) IsMetaFile(path string) bool { return strings.HasSuffix(path, ".ini") } + +// UsesExternalMetadataFile returns true when the backend uses external metadata files +func (IniBackend) UsesExternalMetadataFile() bool { return true } + +// MetadataPath returns the path of the file holding the metadata for the given path +func (IniBackend) MetadataPath(path string) string { return path + ".ini" } diff --git a/pkg/storage/utils/decomposedfs/xattrs/backend/xattrs.go b/pkg/storage/utils/decomposedfs/xattrs/backend/xattrs.go new file mode 100644 index 0000000000..db69a1a7a3 --- /dev/null +++ b/pkg/storage/utils/decomposedfs/xattrs/backend/xattrs.go @@ -0,0 +1,167 @@ +// Copyright 2018-2023 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package backend + +import ( + "strconv" + + "github.com/cs3org/reva/v2/pkg/storage/utils/filelocks" + "github.com/pkg/errors" + "github.com/pkg/xattr" +) + +// XattrsBackend stores the file attributes in extended attributes +type XattrsBackend struct{} + +// Get an extended attribute value for the given key +// No file locking is involved here as reading a single xattr is +// considered to be atomic. +func (b XattrsBackend) Get(filePath, key string) (string, error) { + v, err := xattr.Get(filePath, key) + if err != nil { + return "", err + } + val := string(v) + return val, nil +} + +// GetInt64 reads a string as int64 from the xattrs +func (b XattrsBackend) GetInt64(filePath, key string) (int64, error) { + attr, err := b.Get(filePath, key) + if err != nil { + return 0, err + } + v, err := strconv.ParseInt(attr, 10, 64) + if err != nil { + return 0, err + } + return v, nil +} + +// List retrieves a list of names of extended attributes associated with the +// given path in the file system. +func (XattrsBackend) List(filePath string) (attribs []string, err error) { + attrs, err := xattr.List(filePath) + if err == nil { + return attrs, nil + } + + // listing the attributes failed. lock the file and try again + readLock, err := filelocks.AcquireReadLock(filePath) + + if err != nil { + return nil, errors.Wrap(err, "xattrs: Unable to lock file for read") + } + defer func() { + rerr := filelocks.ReleaseLock(readLock) + + // if err is non nil we do not overwrite that + if err == nil { + err = rerr + } + }() + + return xattr.List(filePath) +} + +// All reads all extended attributes for a node, protected by a +// shared file lock +func (b XattrsBackend) All(filePath string) (attribs map[string]string, err error) { + attrNames, err := b.List(filePath) + + if err != nil { + return nil, err + } + + var ( + xerrs = 0 + xerr error + ) + // error handling: Count if there are errors while reading all attribs. + // if there were any, return an error. + attribs = make(map[string]string, len(attrNames)) + for _, name := range attrNames { + var val []byte + if val, xerr = xattr.Get(filePath, name); xerr != nil { + xerrs++ + } else { + attribs[name] = string(val) + } + } + + if xerrs > 0 { + err = errors.Wrap(xerr, "Failed to read all xattrs") + } + + return attribs, err +} + +// Set sets one attribute for the given path +func (XattrsBackend) Set(path string, key string, val string) (err error) { + return xattr.Set(path, key, []byte(val)) +} + +// SetMultiple sets a set of attribute for the given path +func (XattrsBackend) SetMultiple(path string, attribs map[string]string) (err error) { + // error handling: Count if there are errors while setting the attribs. + // if there were any, return an error. + var ( + xerrs = 0 + xerr error + ) + for key, val := range attribs { + if xerr = xattr.Set(path, key, []byte(val)); xerr != nil { + // log + xerrs++ + } + } + if xerrs > 0 { + return errors.Wrap(xerr, "Failed to set all xattrs") + } + + return nil +} + +// Remove an extended attribute key +func (XattrsBackend) Remove(filePath string, key string) (err error) { + fileLock, err := filelocks.AcquireWriteLock(filePath) + + if err != nil { + return errors.Wrap(err, "xattrs: Can not acquire write log") + } + defer func() { + rerr := filelocks.ReleaseLock(fileLock) + + // if err is non nil we do not overwrite that + if err == nil { + err = rerr + } + }() + + return xattr.Remove(filePath, key) +} + +// IsMetaFile returns whether the given path represents a meta file +func (XattrsBackend) IsMetaFile(path string) bool { return false } + +// UsesExternalMetadataFile returns true when the backend uses external metadata files +func (XattrsBackend) UsesExternalMetadataFile() bool { return false } + +// MetadataPath returns the path of the file holding the metadata for the given path +func (XattrsBackend) MetadataPath(path string) string { return path } diff --git a/pkg/storage/utils/decomposedfs/xattrs/prefixes/ocis_prefix.go b/pkg/storage/utils/decomposedfs/xattrs/prefixes/ocis_prefix.go new file mode 100644 index 0000000000..18e94cf46c --- /dev/null +++ b/pkg/storage/utils/decomposedfs/xattrs/prefixes/ocis_prefix.go @@ -0,0 +1,29 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +//go:build !freebsd + +package prefixes + +// The default namespace for ocis. As non root users can only manipulate +// the user. namespace, which is what is used to store ownCloud specific +// metadata. To prevent name collisions with other apps, we are going to +// introduce a sub namespace "user.ocis." +const ( + OcisPrefix string = "user.ocis." +) diff --git a/pkg/storage/utils/decomposedfs/xattrs/prefixes/ocis_prefix_freebsd.go b/pkg/storage/utils/decomposedfs/xattrs/prefixes/ocis_prefix_freebsd.go new file mode 100644 index 0000000000..c601a0839d --- /dev/null +++ b/pkg/storage/utils/decomposedfs/xattrs/prefixes/ocis_prefix_freebsd.go @@ -0,0 +1,28 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +//go:build freebsd + +package prefixes + +// On FreeBSD the `user` namespace is implied through a separate syscall argument +// and will fail with invalid argument when you try to start an xattr name with user. or system. +// For that reason we drop the superfluous user. prefix for FreeBSD specifically. +const ( + OcisPrefix string = "ocis." +) diff --git a/pkg/storage/utils/decomposedfs/xattrs/prefixes/prefixes.go b/pkg/storage/utils/decomposedfs/xattrs/prefixes/prefixes.go new file mode 100644 index 0000000000..2bab508b7e --- /dev/null +++ b/pkg/storage/utils/decomposedfs/xattrs/prefixes/prefixes.go @@ -0,0 +1,101 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +//go:build !freebsd + +package prefixes + +// Declare a list of xattr keys +// TODO the below comment is currently copied from the owncloud driver, revisit +// Currently,extended file attributes have four separated +// namespaces (user, trusted, security and system) followed by a dot. +// A non root user can only manipulate the user. namespace, which is what +// we will use to store ownCloud specific metadata. To prevent name +// collisions with other apps We are going to introduce a sub namespace +// "user.ocis." in the xattrs_prefix*.go files. +const ( + TypeAttr string = OcisPrefix + "type" + ParentidAttr string = OcisPrefix + "parentid" + OwnerIDAttr string = OcisPrefix + "owner.id" + OwnerIDPAttr string = OcisPrefix + "owner.idp" + OwnerTypeAttr string = OcisPrefix + "owner.type" + // the base name of the node + // updated when the file is renamed or moved + NameAttr string = OcisPrefix + "name" + + BlobIDAttr string = OcisPrefix + "blobid" + BlobsizeAttr string = OcisPrefix + "blobsize" + + // statusPrefix is the prefix for the node status + StatusPrefix string = OcisPrefix + "nodestatus" + + // scanPrefix is the prefix for the virus scan status and date + ScanStatusPrefix string = OcisPrefix + "scanstatus" + ScanDatePrefix string = OcisPrefix + "scandate" + + // grantPrefix is the prefix for sharing related extended attributes + GrantPrefix string = OcisPrefix + "grant." + GrantUserAcePrefix string = OcisPrefix + "grant." + UserAcePrefix + GrantGroupAcePrefix string = OcisPrefix + "grant." + GroupAcePrefix + MetadataPrefix string = OcisPrefix + "md." + + // favorite flag, per user + FavPrefix string = OcisPrefix + "fav." + + // a temporary etag for a folder that is removed when the mtime propagation happens + TmpEtagAttr string = OcisPrefix + "tmp.etag" + ReferenceAttr string = OcisPrefix + "cs3.ref" // arbitrary metadata + ChecksumPrefix string = OcisPrefix + "cs." // followed by the algorithm, eg. ocis.cs.sha1 + TrashOriginAttr string = OcisPrefix + "trash.origin" // trash origin + + // we use a single attribute to enable or disable propagation of both: synctime and treesize + // The propagation attribute is set to '1' at the top of the (sub)tree. Propagation will stop at + // that node. + PropagationAttr string = OcisPrefix + "propagation" + + // the tree modification time of the tree below this node, + // propagated when synctime_accounting is true and + // user.ocis.propagation=1 is set + // stored as a readable time.RFC3339Nano + TreeMTimeAttr string = OcisPrefix + "tmtime" + + // the deletion/disabled time of a space or node + // used to mark space roots as disabled + // stored as a readable time.RFC3339Nano + DTimeAttr string = OcisPrefix + "dtime" + + // the size of the tree below this node, + // propagated when treesize_accounting is true and + // user.ocis.propagation=1 is set + // stored as uint64, little endian + TreesizeAttr string = OcisPrefix + "treesize" + + // the quota for the storage space / tree, regardless who accesses it + QuotaAttr string = OcisPrefix + "quota" + + // the name given to a storage space. It should not contain any semantics as its only purpose is to be read. + SpaceNameAttr string = OcisPrefix + "space.name" + SpaceTypeAttr string = OcisPrefix + "space.type" + SpaceDescriptionAttr string = OcisPrefix + "space.description" + SpaceReadmeAttr string = OcisPrefix + "space.readme" + SpaceImageAttr string = OcisPrefix + "space.image" + SpaceAliasAttr string = OcisPrefix + "space.alias" + + UserAcePrefix string = "u:" + GroupAcePrefix string = "g:" +) diff --git a/pkg/storage/utils/decomposedfs/xattrs/xattrs.go b/pkg/storage/utils/decomposedfs/xattrs/xattrs.go index 695a5a3854..3b20531d05 100644 --- a/pkg/storage/utils/decomposedfs/xattrs/xattrs.go +++ b/pkg/storage/utils/decomposedfs/xattrs/xattrs.go @@ -19,93 +19,12 @@ package xattrs import ( - "strconv" "strings" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/v2/pkg/storage/utils/filelocks" "github.com/gofrs/flock" "github.com/pkg/errors" - "github.com/pkg/xattr" -) - -// Declare a list of xattr keys -// TODO the below comment is currently copied from the owncloud driver, revisit -// Currently,extended file attributes have four separated -// namespaces (user, trusted, security and system) followed by a dot. -// A non root user can only manipulate the user. namespace, which is what -// we will use to store ownCloud specific metadata. To prevent name -// collisions with other apps We are going to introduce a sub namespace -// "user.ocis." in the xattrs_prefix*.go files. -const ( - ParentidAttr string = OcisPrefix + "parentid" - OwnerIDAttr string = OcisPrefix + "owner.id" - OwnerIDPAttr string = OcisPrefix + "owner.idp" - OwnerTypeAttr string = OcisPrefix + "owner.type" - // the base name of the node - // updated when the file is renamed or moved - NameAttr string = OcisPrefix + "name" - - BlobIDAttr string = OcisPrefix + "blobid" - BlobsizeAttr string = OcisPrefix + "blobsize" - - // statusPrefix is the prefix for the node status - StatusPrefix string = OcisPrefix + "nodestatus" - - // scanPrefix is the prefix for the virus scan status and date - ScanStatusPrefix string = OcisPrefix + "scanstatus" - ScanDatePrefix string = OcisPrefix + "scandate" - - // grantPrefix is the prefix for sharing related extended attributes - GrantPrefix string = OcisPrefix + "grant." - GrantUserAcePrefix string = OcisPrefix + "grant." + UserAcePrefix - GrantGroupAcePrefix string = OcisPrefix + "grant." + GroupAcePrefix - MetadataPrefix string = OcisPrefix + "md." - - // favorite flag, per user - FavPrefix string = OcisPrefix + "fav." - - // a temporary etag for a folder that is removed when the mtime propagation happens - TmpEtagAttr string = OcisPrefix + "tmp.etag" - ReferenceAttr string = OcisPrefix + "cs3.ref" // arbitrary metadata - ChecksumPrefix string = OcisPrefix + "cs." // followed by the algorithm, eg. ocis.cs.sha1 - TrashOriginAttr string = OcisPrefix + "trash.origin" // trash origin - - // we use a single attribute to enable or disable propagation of both: synctime and treesize - // The propagation attribute is set to '1' at the top of the (sub)tree. Propagation will stop at - // that node. - PropagationAttr string = OcisPrefix + "propagation" - - // the tree modification time of the tree below this node, - // propagated when synctime_accounting is true and - // user.ocis.propagation=1 is set - // stored as a readable time.RFC3339Nano - TreeMTimeAttr string = OcisPrefix + "tmtime" - - // the deletion/disabled time of a space or node - // used to mark space roots as disabled - // stored as a readable time.RFC3339Nano - DTimeAttr string = OcisPrefix + "dtime" - - // the size of the tree below this node, - // propagated when treesize_accounting is true and - // user.ocis.propagation=1 is set - // stored as uint64, little endian - TreesizeAttr string = OcisPrefix + "treesize" - - // the quota for the storage space / tree, regardless who accesses it - QuotaAttr string = OcisPrefix + "quota" - - // the name given to a storage space. It should not contain any semantics as its only purpose is to be read. - SpaceNameAttr string = OcisPrefix + "space.name" - SpaceTypeAttr string = OcisPrefix + "space.type" - SpaceDescriptionAttr string = OcisPrefix + "space.description" - SpaceReadmeAttr string = OcisPrefix + "space.readme" - SpaceImageAttr string = OcisPrefix + "space.image" - SpaceAliasAttr string = OcisPrefix + "space.alias" - - UserAcePrefix string = "u:" - GroupAcePrefix string = "g:" ) // ReferenceFromAttr returns a CS3 reference from xattr of a node. @@ -167,7 +86,7 @@ func CopyMetadataWithSourceLock(src, target string, filter func(attributeName st // both locks are established. Copy. var attrNameList []string - if attrNameList, err = xattr.List(src); err != nil { + if attrNameList, err = backend.List(src); err != nil { return errors.Wrap(err, "Can not get xattr listing on src") } @@ -180,11 +99,11 @@ func CopyMetadataWithSourceLock(src, target string, filter func(attributeName st for idx := range attrNameList { attrName := attrNameList[idx] if filter == nil || filter(attrName) { - var attrVal []byte - if attrVal, xerr = xattr.Get(src, attrName); xerr != nil { + var attrVal string + if attrVal, xerr = backend.Get(src, attrName); xerr != nil { xerrs++ } - if xerr = xattr.Set(target, attrName, attrVal); xerr != nil { + if xerr = backend.Set(target, attrName, attrVal); xerr != nil { xerrs++ } } @@ -211,7 +130,7 @@ func Set(filePath string, key string, val string) (err error) { } }() - return xattr.Set(filePath, key, []byte(val)) + return SetWithLock(filePath, key, val, fileLock) } // SetWithLock an extended attribute key to the given value with an existing lock @@ -226,26 +145,12 @@ func SetWithLock(filePath string, key string, val string, fileLock *flock.Flock) return errors.New("not write locked") } - return xattr.Set(filePath, key, []byte(val)) + return backend.Set(filePath, key, val) } // Remove an extended attribute key func Remove(filePath string, key string) (err error) { - fileLock, err := filelocks.AcquireWriteLock(filePath) - - if err != nil { - return errors.Wrap(err, "xattrs: Can not acquire write log") - } - defer func() { - rerr := filelocks.ReleaseLock(fileLock) - - // if err is non nil we do not overwrite that - if err == nil { - err = rerr - } - }() - - return xattr.Remove(filePath, key) + return backend.Remove(filePath, key) } // SetMultiple allows setting multiple key value pairs at once @@ -282,103 +187,41 @@ func SetMultipleWithLock(filePath string, attribs map[string]string, fileLock *f return errors.New("not locked") } - // error handling: Count if there are errors while setting the attribs. - // if there were any, return an error. - var ( - xerrs = 0 - xerr error - ) - for key, val := range attribs { - if xerr = xattr.Set(filePath, key, []byte(val)); xerr != nil { - // log - xerrs++ - } - } - if xerrs > 0 { - err = errors.Wrap(xerr, "Failed to set all xattrs") - } - return err + return backend.SetMultiple(filePath, attribs) +} + +// All reads all extended attributes for a node +func All(path string) (map[string]string, error) { + return backend.All(path) } // Get an extended attribute value for the given key -// No file locking is involved here as reading a single xattr is -// considered to be atomic. -func Get(filePath, key string) (string, error) { - v, err := xattr.Get(filePath, key) - if err != nil { - return "", err - } - val := string(v) - return val, nil +func Get(path, key string) (string, error) { + return backend.Get(path, key) } // GetInt64 reads a string as int64 from the xattrs -func GetInt64(filePath, key string) (int64, error) { - attr, err := Get(filePath, key) - if err != nil { - return 0, err - } - v, err := strconv.ParseInt(attr, 10, 64) - if err != nil { - return 0, err - } - return v, nil +func GetInt64(path, key string) (int64, error) { + return backend.GetInt64(path, key) } // List retrieves a list of names of extended attributes associated with the // given path in the file system. -func List(filePath string) (attribs []string, err error) { - attrs, err := xattr.List(filePath) - if err == nil { - return attrs, nil - } - - // listing the attributes failed. lock the file and try again - readLock, err := filelocks.AcquireReadLock(filePath) - - if err != nil { - return nil, errors.Wrap(err, "xattrs: Unable to lock file for read") - } - defer func() { - rerr := filelocks.ReleaseLock(readLock) - - // if err is non nil we do not overwrite that - if err == nil { - err = rerr - } - }() - - return xattr.List(filePath) +func List(path string) ([]string, error) { + return backend.List(path) } -// All reads all extended attributes for a node, protected by a -// shared file lock -func All(filePath string) (attribs map[string]string, err error) { - attrNames, err := List(filePath) - - if err != nil { - return nil, err - } - - var ( - xerrs = 0 - xerr error - ) - // error handling: Count if there are errors while reading all attribs. - // if there were any, return an error. - attribs = make(map[string]string, len(attrNames)) - for _, name := range attrNames { - var val []byte - if val, xerr = xattr.Get(filePath, name); xerr != nil { - xerrs++ - } else { - attribs[name] = string(val) - } - } +// MetadataPath returns the path of the file holding the metadata for the given path +func MetadataPath(path string) string { + return backend.MetadataPath(path) +} - if xerrs > 0 { - err = errors.Wrap(xerr, "Failed to read all xattrs") - } +// UsesExternalMetadataFile returns true when the backend uses external metadata files +func UsesExternalMetadataFile() bool { + return backend.UsesExternalMetadataFile() +} - return attribs, err +// IsMetaFile returns whether the given path represents a meta file +func IsMetaFile(path string) bool { + return backend.IsMetaFile(path) } diff --git a/pkg/storage/utils/decomposedfs/xattrs/xattrs_prefix.go b/pkg/storage/utils/decomposedfs/xattrs/xattrs_prefix.go deleted file mode 100644 index c8277ae58c..0000000000 --- a/pkg/storage/utils/decomposedfs/xattrs/xattrs_prefix.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !freebsd - -package xattrs - -// The default namespace for ocis. As non root users can only manipulate -// the user. namespace, which is what is used to store ownCloud specific -// metadata. To prevent name collisions with other apps, we are going to -// introduce a sub namespace "user.ocis." -const ( - OcisPrefix string = "user.ocis." -) diff --git a/pkg/storage/utils/decomposedfs/xattrs/xattrs_prefix_freebsd.go b/pkg/storage/utils/decomposedfs/xattrs/xattrs_prefix_freebsd.go deleted file mode 100644 index 4dd2323590..0000000000 --- a/pkg/storage/utils/decomposedfs/xattrs/xattrs_prefix_freebsd.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build freebsd - -package xattrs - -// On FreeBSD the `user` namespace is implied through a separate syscall argument -// and will fail with invalid argument when you try to start an xattr name with user. or system. -// For that reason we drop the superfluous user. prefix for FreeBSD specifically. -const ( - OcisPrefix string = "ocis." -) diff --git a/pkg/storage/utils/decomposedfs/xattrs/xattrs_suite_test.go b/pkg/storage/utils/decomposedfs/xattrs/xattrs_suite_test.go new file mode 100644 index 0000000000..f663680a8f --- /dev/null +++ b/pkg/storage/utils/decomposedfs/xattrs/xattrs_suite_test.go @@ -0,0 +1,13 @@ +package xattrs_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestXattrs(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Xattrs Suite") +}