diff --git a/internal/grpc/services/sharesstorageprovider/sharesstorageprovider.go b/internal/grpc/services/sharesstorageprovider/sharesstorageprovider.go index 196cb1108e6..17f30f19abd 100644 --- a/internal/grpc/services/sharesstorageprovider/sharesstorageprovider.go +++ b/internal/grpc/services/sharesstorageprovider/sharesstorageprovider.go @@ -24,7 +24,6 @@ import ( "path/filepath" "strings" - "github.com/cs3org/reva/v2/pkg/share" "github.com/cs3org/reva/v2/pkg/storagespace" "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -352,10 +351,10 @@ func (s *service) ListStorageSpaces(ctx context.Context, req *provider.ListStora } var receivedShares []*collaboration.ReceivedShare - var shareMd map[string]share.Metadata + var shareInfo map[string]*provider.ResourceInfo var err error if fetchShares { - receivedShares, shareMd, err = s.fetchShares(ctx) + receivedShares, shareInfo, err = s.fetchShares(ctx) if err != nil { return nil, errors.Wrap(err, "sharesstorageprovider: error calling ListReceivedSharesRequest") } @@ -372,13 +371,13 @@ func (s *service) ListStorageSpaces(ctx context.Context, req *provider.ListStora OpaqueId: utils.ShareStorageProviderID, } if spaceID == nil || isShareJailRoot(spaceID) { - earliestShare, atLeastOneAccepted := findEarliestShare(receivedShares, shareMd) + earliestShare, atLeastOneAccepted := findEarliestShare(receivedShares, shareInfo) var opaque *typesv1beta1.Opaque var mtime *typesv1beta1.Timestamp if earliestShare != nil { - if md, ok := shareMd[earliestShare.Id.OpaqueId]; ok { - mtime = md.Mtime - opaque = utils.AppendPlainToOpaque(opaque, "etag", md.ETag) + if info, ok := shareInfo[earliestShare.Id.OpaqueId]; ok { + mtime = info.Mtime + opaque = utils.AppendPlainToOpaque(opaque, "etag", info.Etag) } } // only display the shares jail if we have accepted shares @@ -407,20 +406,16 @@ func (s *service) ListStorageSpaces(ctx context.Context, req *provider.ListStora // none of our business continue } - var opaque *typesv1beta1.Opaque - if md, ok := shareMd[receivedShare.Share.Id.OpaqueId]; ok { - opaque = utils.AppendPlainToOpaque(opaque, "etag", md.ETag) - } // we know a grant for this resource space := &provider.StorageSpace{ - Opaque: opaque, Id: &provider.StorageSpaceId{ OpaqueId: storagespace.FormatResourceID(*root), }, SpaceType: "grant", Owner: &userv1beta1.User{Id: receivedShare.Share.Owner}, // the sharesstorageprovider keeps track of mount points - Root: root, + Root: root, + RootInfo: shareInfo[receivedShare.Share.Id.OpaqueId], } res.StorageSpaces = append(res.StorageSpaces, space) @@ -448,9 +443,7 @@ func (s *service) ListStorageSpaces(ctx context.Context, req *provider.ListStora } } var opaque *typesv1beta1.Opaque - if md, ok := shareMd[receivedShare.Share.Id.OpaqueId]; ok { - opaque = utils.AppendPlainToOpaque(opaque, "etag", md.ETag) - } else { + if _, ok := shareInfo[receivedShare.Share.Id.OpaqueId]; !ok { // we could not stat the share, skip it continue } @@ -473,7 +466,8 @@ func (s *service) ListStorageSpaces(ctx context.Context, req *provider.ListStora SpaceType: "mountpoint", Owner: &userv1beta1.User{Id: receivedShare.Share.Owner}, // FIXME actually, the mount point belongs to the recipient // the sharesstorageprovider keeps track of mount points - Root: root, + Root: root, + RootInfo: shareInfo[receivedShare.Share.Id.OpaqueId], } // TODO in the future the spaces registry will handle the alias for share spaces. @@ -690,7 +684,7 @@ func (s *service) Stat(ctx context.Context, req *provider.StatRequest) (*provide PermissionSet: &provider.ResourcePermissions{ // TODO }, - Etag: shareMd[earliestShare.Id.OpaqueId].ETag, + Etag: shareMd[earliestShare.Id.OpaqueId].Etag, }, }, nil } @@ -1009,7 +1003,7 @@ func (s *service) rejectReceivedShare(ctx context.Context, receivedShare *collab return errtypes.NewErrtypeFromStatus(res.Status) } -func (s *service) fetchShares(ctx context.Context) ([]*collaboration.ReceivedShare, map[string]share.Metadata, error) { +func (s *service) fetchShares(ctx context.Context) ([]*collaboration.ReceivedShare, map[string]*provider.ResourceInfo, error) { lsRes, err := s.sharesProviderClient.ListReceivedShares(ctx, &collaboration.ListReceivedSharesRequest{ // FIXME filter by received shares for resource id - listing all shares is tooo expensive! }) @@ -1020,7 +1014,7 @@ func (s *service) fetchShares(ctx context.Context) ([]*collaboration.ReceivedSha return nil, nil, fmt.Errorf("sharesstorageprovider: error calling ListReceivedSharesRequest") } - shareMetaData := make(map[string]share.Metadata, len(lsRes.Shares)) + shareMetaData := make(map[string]*provider.ResourceInfo, len(lsRes.Shares)) for _, rs := range lsRes.Shares { // only stat accepted shares if rs.State != collaboration.ShareState_SHARE_STATE_ACCEPTED { @@ -1041,13 +1035,13 @@ func (s *service) fetchShares(ctx context.Context) ([]*collaboration.ReceivedSha Msg("ListRecievedShares: failed to stat the resource") continue } - shareMetaData[rs.Share.Id.OpaqueId] = share.Metadata{ETag: sRes.Info.Etag, Mtime: sRes.Info.Mtime} + shareMetaData[rs.Share.Id.OpaqueId] = sRes.Info } return lsRes.Shares, shareMetaData, nil } -func findEarliestShare(receivedShares []*collaboration.ReceivedShare, shareMd map[string]share.Metadata) (earliestShare *collaboration.Share, atLeastOneAccepted bool) { +func findEarliestShare(receivedShares []*collaboration.ReceivedShare, shareInfo map[string]*provider.ResourceInfo) (earliestShare *collaboration.Share, atLeastOneAccepted bool) { for _, rs := range receivedShares { var hasCurrentMd bool var hasEarliestMd bool @@ -1059,10 +1053,10 @@ func findEarliestShare(receivedShares []*collaboration.ReceivedShare, shareMd ma // We cannot assume that every share has metadata if current.Id != nil { - _, hasCurrentMd = shareMd[current.Id.OpaqueId] + _, hasCurrentMd = shareInfo[current.Id.OpaqueId] } if earliestShare != nil && earliestShare.Id != nil { - _, hasEarliestMd = shareMd[earliestShare.Id.OpaqueId] + _, hasEarliestMd = shareInfo[earliestShare.Id.OpaqueId] } switch { @@ -1071,10 +1065,10 @@ func findEarliestShare(receivedShares []*collaboration.ReceivedShare, shareMd ma // ignore if one of the shares has no metadata case !hasEarliestMd || !hasCurrentMd: continue - case shareMd[current.Id.OpaqueId].Mtime.Seconds > shareMd[earliestShare.Id.OpaqueId].Mtime.Seconds: + case shareInfo[current.Id.OpaqueId].Mtime.Seconds > shareInfo[earliestShare.Id.OpaqueId].Mtime.Seconds: earliestShare = current - case shareMd[current.Id.OpaqueId].Mtime.Seconds == shareMd[earliestShare.Id.OpaqueId].Mtime.Seconds && - shareMd[current.Id.OpaqueId].Mtime.Nanos > shareMd[earliestShare.Id.OpaqueId].Mtime.Nanos: + case shareInfo[current.Id.OpaqueId].Mtime.Seconds == shareInfo[earliestShare.Id.OpaqueId].Mtime.Seconds && + shareInfo[current.Id.OpaqueId].Mtime.Nanos > shareInfo[earliestShare.Id.OpaqueId].Mtime.Nanos: earliestShare = current } } diff --git a/internal/http/services/owncloud/ocdav/locks.go b/internal/http/services/owncloud/ocdav/locks.go index 10f7fd6f067..64c922d03da 100644 --- a/internal/http/services/owncloud/ocdav/locks.go +++ b/internal/http/services/owncloud/ocdav/locks.go @@ -400,22 +400,12 @@ func (s *svc) handleSpacesLock(w http.ResponseWriter, r *http.Request, spaceID s span.SetAttributes(attribute.String("component", "ocdav")) - client, err := s.getClient() - if err != nil { - return http.StatusInternalServerError, err + // build storage space reference + ref := spacelookup.MakeStorageSpaceReference(spaceID, r.URL.Path) + if ref == nil { + return http.StatusBadRequest, fmt.Errorf("invalid space id") } - // retrieve a specific storage space - space, cs3Status, err := spacelookup.LookUpStorageSpaceByID(ctx, client, spaceID) - if err != nil { - return http.StatusInternalServerError, err - } - if cs3Status.Code != rpc.Code_CODE_OK { - return http.StatusInternalServerError, errtypes.NewErrtypeFromStatus(cs3Status) - } - - ref := spacelookup.MakeRelativeReference(space, r.URL.Path, true) - return s.lockReference(ctx, w, r, ref) } diff --git a/internal/http/services/owncloud/ocdav/propfind/propfind.go b/internal/http/services/owncloud/ocdav/propfind/propfind.go index 0f7657c05f9..038a7e61f10 100644 --- a/internal/http/services/owncloud/ocdav/propfind/propfind.go +++ b/internal/http/services/owncloud/ocdav/propfind/propfind.go @@ -51,11 +51,12 @@ import ( "github.com/cs3org/reva/v2/pkg/utils" "github.com/rs/zerolog" "go.opentelemetry.io/otel/codes" + "google.golang.org/protobuf/types/known/fieldmaskpb" ) const ( - tracerName = "ocdav" - // _spaceTypeProject = "project" + tracerName = "ocdav" + _spaceTypeProject = "project" ) type countingReader struct { @@ -200,6 +201,7 @@ func (p *Handler) HandlePathPropfind(w http.ResponseWriter, r *http.Request, ns return } + // TODO look up all spaces and request the root_info in the field mask spaces, rpcStatus, err := spacelookup.LookUpStorageSpacesForPathWithChildren(ctx, client, fn) if err != nil { sublog.Error().Err(err).Msg("error sending a grpc request") @@ -212,28 +214,12 @@ func (p *Handler) HandlePathPropfind(w http.ResponseWriter, r *http.Request, ns return } - /*var root *provider.StorageSpace - - switch { - case len(spaces) == 1: - root = spaces[0] - case len(spaces) > 1: - for _, space := range spaces { - if isVirtualRootResourceID(space.Root) { - root = space - } - } - if root == nil { - root = spaces[0] - } - }*/ - - resourceInfos, sendTusHeaders, ok := p.getResourceInfos(ctx, w, r, pf, spaces, fn, false, sublog) + resourceInfos, sendTusHeaders, ok := p.getResourceInfos(ctx, w, r, pf, spaces, fn, sublog) if !ok { // getResourceInfos handles responses in case of an error so we can just return here. return } - p.propfindResponse(ctx, w, r, ns /*root.SpaceType,*/, pf, sendTusHeaders, resourceInfos, sublog) + p.propfindResponse(ctx, w, r, ns, pf, sendTusHeaders, resourceInfos, sublog) } // HandleSpacesPropfind handles a spaces based propfind request @@ -242,13 +228,17 @@ func (p *Handler) HandleSpacesPropfind(w http.ResponseWriter, r *http.Request, s defer span.End() sublog := appctx.GetLogger(ctx).With().Str("path", r.URL.Path).Str("spaceid", spaceID).Logger() - /*client, err := p.getClient() + dh := r.Header.Get(net.HeaderDepth) + + depth, err := net.ParseDepth(dh) if err != nil { - sublog.Error().Err(err).Msg("error getting grpc client") - w.WriteHeader(http.StatusInternalServerError) + sublog.Debug().Str("depth", dh).Msg(err.Error()) + w.WriteHeader(http.StatusBadRequest) + m := fmt.Sprintf("Invalid Depth header value: %v", dh) + b, err := errors.Marshal(http.StatusBadRequest, m, "") + errors.HandleWebdavError(&sublog, w, b, err) return } - */ pf, status, err := ReadPropfind(r.Body) if err != nil { @@ -257,58 +247,94 @@ func (p *Handler) HandleSpacesPropfind(w http.ResponseWriter, r *http.Request, s return } - /* - // retrieve a specific storage space - space, rpcStatus, err := spacelookup.LookUpStorageSpaceByID(ctx, client, spaceID) - if err != nil { - sublog.Error().Err(err).Msg("error looking up the space by id") - w.WriteHeader(http.StatusInternalServerError) - return - } - - if rpcStatus.Code != rpc.Code_CODE_OK { - errors.HandleErrorStatus(&sublog, w, rpcStatus) - return - } - */ - - resourceID, err := storagespace.ParseID(spaceID) + ref, err := spacelookup.MakeStorageSpaceReference(spaceID, r.URL.Path) if err != nil { - sublog.Debug().Str("spaceID", spaceID).Msg(err.Error()) + w.WriteHeader(http.StatusBadRequest) + sublog.Debug().Msg("invalid space id") w.WriteHeader(http.StatusBadRequest) m := fmt.Sprintf("Invalid space id: %v", spaceID) b, err := errors.Marshal(http.StatusBadRequest, m, "") errors.HandleWebdavError(&sublog, w, b, err) return } - // fake a space root - root := &provider.StorageSpace{ - Opaque: &typesv1beta1.Opaque{ - Map: map[string]*typesv1beta1.OpaqueEntry{ - "path": { - Decoder: "plain", - Value: []byte("/"), + + client, err := p.getClient() + if err != nil { + sublog.Error().Err(err).Msg("error getting grpc client") + w.WriteHeader(http.StatusInternalServerError) + return + } + + // stat the reference and request the space in the field mask + res, err := client.Stat(ctx, &provider.StatRequest{ + Ref: &ref, + ArbitraryMetadataKeys: metadataKeys(pf), + FieldMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, + }) + if err != nil { + sublog.Error().Err(err).Msg("error getting grpc client") + w.WriteHeader(http.StatusInternalServerError) + return + } + if res.Status.Code != rpc.Code_CODE_OK { + errors.HandleErrorStatus(&sublog, w, res.Status) + return + } + var space *provider.StorageSpace + if res.Info.Space == nil { + sublog.Debug().Msg("stat did not include a space, executing an additional lookup request") + // TODO look up space? hm can the isShared check even work when stating a space? hm yeah ... well ... *mindblown* + // fake a space root + space = &provider.StorageSpace{ + Id: &provider.StorageSpaceId{OpaqueId: spaceID}, + Opaque: &typesv1beta1.Opaque{ + Map: map[string]*typesv1beta1.OpaqueEntry{ + "path": { + Decoder: "plain", + Value: []byte("/"), + }, }, }, - }, - Root: &resourceID, + Root: ref.ResourceId, + RootInfo: res.Info, + } } - resourceInfos, sendTusHeaders, ok := p.getResourceInfos(ctx, w, r, pf, []*provider.StorageSpace{root /*space*/}, r.URL.Path, true, sublog) - if !ok { - // getResourceInfos handles responses in case of an error so we can just return here. - return + + resourceInfos := []*provider.ResourceInfo{ + res.Info, + } + if res.Info.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER && depth != net.DepthZero { + childInfos, ok := p.getSpaceResourceInfos(ctx, w, r, pf, &ref, r.URL.Path, depth, sublog) + if !ok { + // getResourceInfos handles responses in case of an error so we can just return here. + return + } + resourceInfos = append(resourceInfos, childInfos...) } // prefix space id to paths for i := range resourceInfos { resourceInfos[i].Path = path.Join("/", spaceID, resourceInfos[i].Path) + // add space to info so propfindResponse can access space type + if resourceInfos[i].Space == nil { + resourceInfos[i].Space = space + } } - p.propfindResponse(ctx, w, r, "" /*space.SpaceType,*/, pf, sendTusHeaders, resourceInfos, sublog) + sendTusHeaders := true + // let clients know this collection supports tus.io POST requests to start uploads + if res.Info.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + if res.Info.Opaque != nil { + _, ok := res.Info.Opaque.Map["disable_tus"] + sendTusHeaders = !ok + } + } + + p.propfindResponse(ctx, w, r, "", pf, sendTusHeaders, resourceInfos, sublog) } -func (p *Handler) propfindResponse(ctx context.Context, w http.ResponseWriter, r *http.Request, namespace /*, spaceType*/ string, pf XML, sendTusHeaders bool, resourceInfos []*provider.ResourceInfo, log zerolog.Logger) { +func (p *Handler) propfindResponse(ctx context.Context, w http.ResponseWriter, r *http.Request, namespace string, pf XML, sendTusHeaders bool, resourceInfos []*provider.ResourceInfo, log zerolog.Logger) { ctx, span := appctx.GetTracerProvider(r.Context()).Tracer(tracerName).Start(ctx, "propfind_response") defer span.End() @@ -340,7 +366,7 @@ func (p *Handler) propfindResponse(ctx context.Context, w http.ResponseWriter, r } } - propRes, err := MultistatusResponse(ctx, &pf, resourceInfos, p.PublicURL, namespace /*spaceType,*/, linkshares) + propRes, err := MultistatusResponse(ctx, &pf, resourceInfos, p.PublicURL, namespace, linkshares) if err != nil { log.Error().Err(err).Msg("error formatting propfind") w.WriteHeader(http.StatusInternalServerError) @@ -374,7 +400,7 @@ func (p *Handler) statSpace(ctx context.Context, client gateway.GatewayAPIClient return res.GetInfo(), res.GetStatus(), nil } -func (p *Handler) getResourceInfos(ctx context.Context, w http.ResponseWriter, r *http.Request, pf XML, spaces []*provider.StorageSpace, requestPath string, spacesPropfind bool, log zerolog.Logger) ([]*provider.ResourceInfo, bool, bool) { +func (p *Handler) getResourceInfos(ctx context.Context, w http.ResponseWriter, r *http.Request, pf XML, spaces []*provider.StorageSpace, requestPath string, log zerolog.Logger) ([]*provider.ResourceInfo, bool, bool) { dh := r.Header.Get(net.HeaderDepth) depth, err := net.ParseDepth(dh) if err != nil { @@ -393,23 +419,7 @@ func (p *Handler) getResourceInfos(ctx context.Context, w http.ResponseWriter, r return nil, false, false } - var metadataKeys []string - - if pf.Allprop != nil { - // TODO this changes the behavior and returns all properties if allprops has been set, - // but allprops should only return some default properties - // see https://tools.ietf.org/html/rfc4918#section-9.1 - // the description of arbitrary_metadata_keys in https://cs3org.github.io/cs3apis/#cs3.storage.provider.v1beta1.ListContainerRequest an others may need clarification - // tracked in https://github.com/cs3org/cs3apis/issues/104 - metadataKeys = append(metadataKeys, "*") - } else { - metadataKeys = make([]string, 0, len(pf.Prop)) - for i := range pf.Prop { - if requiresExplicitFetching(&pf.Prop[i]) { - metadataKeys = append(metadataKeys, metadataKeyOf(&pf.Prop[i])) - } - } - } + metadataKeys := metadataKeys(pf) // we need to stat all spaces to aggregate the root etag, mtime and size // TODO cache per space (hah, no longer per user + per space!) @@ -424,24 +434,37 @@ func (p *Handler) getResourceInfos(ctx context.Context, w http.ResponseWriter, r if spacePath = utils.ReadPlainFromOpaque(space.Opaque, "path"); spacePath == "" { continue // not mounted } + if space.RootInfo == nil { + spaceRef, err := spacelookup.MakeStorageSpaceReference(space.Id.OpaqueId, ".") + if err != nil { + continue + } + info, status, err := p.statSpace(ctx, client, &spaceRef, metadataKeys) + if err != nil || status.GetCode() != rpc.Code_CODE_OK { + continue + } + space.RootInfo = info + } // TODO separate stats to the path or to the children, after statting all children update the mtime/etag // TODO get mtime, and size from space as well, so we no longer have to stat here? would require sending the requested metadata keys as well // root should be a ResourceInfo so it can contain the full stat, not only the id ... do we even need spaces then? // metadata keys could all be prefixed with "root." to indicate we want more than the root id ... - spaceRef := spacelookup.MakeRelativeReference(space, requestPath, spacesPropfind) - info, status, err := p.statSpace(ctx, client, spaceRef, metadataKeys) - if err != nil || status.GetCode() != rpc.Code_CODE_OK { - continue + // TODO can we reuse the space.rootinfo? + spaceRef := spacelookup.MakeRelativeReference(space, requestPath, false) + var info *provider.ResourceInfo + if spaceRef.Path == "." && utils.ResourceIDEqual(spaceRef.ResourceId, space.Root) { + info = space.RootInfo + } else { + var status *rpc.Status + info, status, err = p.statSpace(ctx, client, spaceRef, metadataKeys) + if err != nil || status.GetCode() != rpc.Code_CODE_OK { + continue + } } // adjust path - if spacesPropfind { - // we need to prefix the path with / to make subsequent prefix matches work - info.Path = filepath.Join("/", spaceRef.Path) - } else { - info.Path = filepath.Join(spacePath, spaceRef.Path) - } + info.Path = filepath.Join(spacePath, spaceRef.Path) spaceMap[info] = spaceData{Ref: spaceRef, SpaceType: space.SpaceType} @@ -496,12 +519,12 @@ func (p *Handler) getResourceInfos(ctx context.Context, w http.ResponseWriter, r childInfos := map[string]*provider.ResourceInfo{} for spaceInfo, spaceData := range spaceMap { switch { - case !spacesPropfind && spaceInfo.Type != provider.ResourceType_RESOURCE_TYPE_CONTAINER && depth != net.DepthInfinity: + case spaceInfo.Type != provider.ResourceType_RESOURCE_TYPE_CONTAINER && depth != net.DepthInfinity: addChild(childInfos, spaceInfo, requestPath, rootInfo) case spaceInfo.Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER && depth == net.DepthOne: switch { - case strings.HasPrefix(requestPath, spaceInfo.Path) && (spacesPropfind || spaceData.SpaceType != "virtual"): + case strings.HasPrefix(requestPath, spaceInfo.Path) && spaceData.SpaceType != "virtual": req := &provider.ListContainerRequest{ Ref: spaceData.Ref, ArbitraryMetadataKeys: metadataKeys, @@ -562,9 +585,6 @@ func (p *Handler) getResourceInfos(ctx context.Context, w http.ResponseWriter, r for i := len(res.Infos) - 1; i >= 0; i-- { // add path to resource res.Infos[i].Path = filepath.Join(info.Path, res.Infos[i].Path) - if spacesPropfind { - res.Infos[i].Path = utils.MakeRelativePath(res.Infos[i].Path) - } if res.Infos[i].Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { stack = append(stack, res.Infos[i]) } @@ -596,6 +616,112 @@ func (p *Handler) getResourceInfos(ctx context.Context, w http.ResponseWriter, r return resourceInfos, sendTusHeaders, true } +func (p *Handler) getSpaceResourceInfos(ctx context.Context, w http.ResponseWriter, r *http.Request, pf XML, ref *provider.Reference, requestPath string, depth net.Depth, log zerolog.Logger) ([]*provider.ResourceInfo, bool) { + + client, err := p.getClient() + if err != nil { + log.Error().Err(err).Msg("error getting grpc client") + w.WriteHeader(http.StatusInternalServerError) + return nil, false + } + + metadataKeys := metadataKeys(pf) + + // we need to prefix the path with / to make subsequent prefix matches work + //info.Path = filepath.Join("/", spaceRef.Path) + + resourceInfos := []*provider.ResourceInfo{} + + req := &provider.ListContainerRequest{ + Ref: ref, + ArbitraryMetadataKeys: metadataKeys, + } + res, err := client.ListContainer(ctx, req) + if err != nil { + log.Error().Err(err).Msg("error sending list container grpc request") + w.WriteHeader(http.StatusInternalServerError) + return nil, false + } + + if res.Status.Code != rpc.Code_CODE_OK { + log.Debug().Interface("status", res.Status).Msg("List Container not ok, skipping") + return nil, false + } + for _, info := range res.Infos { + info.Path = path.Join(requestPath, info.Path) + } + resourceInfos = append(resourceInfos, res.Infos...) + + if depth == net.DepthInfinity { + // use a stack to explore sub-containers breadth-first + stack := resourceInfos + for len(stack) != 0 { + info := stack[0] + stack = stack[1:] + + if info.Type != provider.ResourceType_RESOURCE_TYPE_CONTAINER /*|| space.SpaceType == "virtual"*/ { + continue + } + req := &provider.ListContainerRequest{ + Ref: &provider.Reference{ + ResourceId: info.Id, + Path: ".", + }, + ArbitraryMetadataKeys: metadataKeys, + } + res, err := client.ListContainer(ctx, req) // FIXME public link depth infinity -> "gateway: could not find provider: gateway: error calling ListStorageProviders: rpc error: code = PermissionDenied desc = auth: core access token is invalid" + if err != nil { + log.Error().Err(err).Interface("info", info).Msg("error sending list container grpc request") + w.WriteHeader(http.StatusInternalServerError) + return nil, false + } + if res.Status.Code != rpc.Code_CODE_OK { + log.Debug().Interface("status", res.Status).Msg("List Container not ok, skipping") + continue + } + + // check sub-containers in reverse order and add them to the stack + // the reversed order here will produce a more logical sorting of results + for i := len(res.Infos) - 1; i >= 0; i-- { + // add path to resource + res.Infos[i].Path = filepath.Join(info.Path, res.Infos[i].Path) + res.Infos[i].Path = utils.MakeRelativePath(res.Infos[i].Path) + if res.Infos[i].Type == provider.ResourceType_RESOURCE_TYPE_CONTAINER { + stack = append(stack, res.Infos[i]) + } + } + + resourceInfos = append(resourceInfos, res.Infos...) + // TODO: stream response to avoid storing too many results in memory + // we can do that after having stated the root. + } + } + + return resourceInfos, true +} + +func metadataKeys(pf XML) []string { + + var metadataKeys []string + + if pf.Allprop != nil { + // TODO this changes the behavior and returns all properties if allprops has been set, + // but allprops should only return some default properties + // see https://tools.ietf.org/html/rfc4918#section-9.1 + // the description of arbitrary_metadata_keys in https://cs3org.github.io/cs3apis/#cs3.storage.provider.v1beta1.ListContainerRequest an others may need clarification + // tracked in https://github.com/cs3org/cs3apis/issues/104 + metadataKeys = append(metadataKeys, "*") + } else { + metadataKeys = make([]string, 0, len(pf.Prop)) + for i := range pf.Prop { + if requiresExplicitFetching(&pf.Prop[i]) { + metadataKeys = append(metadataKeys, metadataKeyOf(&pf.Prop[i])) + } + } + } + return metadataKeys +} + func addChild(childInfos map[string]*provider.ResourceInfo, spaceInfo *provider.ResourceInfo, requestPath string, @@ -687,10 +813,10 @@ func ReadPropfind(r io.Reader) (pf XML, status int, err error) { } // MultistatusResponse converts a list of resource infos into a multistatus response string -func MultistatusResponse(ctx context.Context, pf *XML, mds []*provider.ResourceInfo, publicURL, ns /*, spaceType*/ string, linkshares map[string]struct{}) ([]byte, error) { +func MultistatusResponse(ctx context.Context, pf *XML, mds []*provider.ResourceInfo, publicURL, ns string, linkshares map[string]struct{}) ([]byte, error) { responses := make([]*ResponseXML, 0, len(mds)) for i := range mds { - res, err := mdToPropResponse(ctx, pf, mds[i], publicURL, ns /*spaceType,*/, linkshares) + res, err := mdToPropResponse(ctx, pf, mds[i], publicURL, ns, linkshares) if err != nil { return nil, err } @@ -709,7 +835,7 @@ func MultistatusResponse(ctx context.Context, pf *XML, mds []*provider.ResourceI // mdToPropResponse converts the CS3 metadata into a webdav PropResponse // ns is the CS3 namespace that needs to be removed from the CS3 path before // prefixing it with the baseURI -func mdToPropResponse(ctx context.Context, pf *XML, md *provider.ResourceInfo, publicURL, ns /*, spaceType*/ string, linkshares map[string]struct{}) (*ResponseXML, error) { +func mdToPropResponse(ctx context.Context, pf *XML, md *provider.ResourceInfo, publicURL, ns string, linkshares map[string]struct{}) (*ResponseXML, error) { sublog := appctx.GetLogger(ctx).With().Interface("md", md).Str("ns", ns).Logger() md.Path = strings.TrimPrefix(md.Path, ns) @@ -757,7 +883,9 @@ func mdToPropResponse(ctx context.Context, pf *XML, md *provider.ResourceInfo, p role := conversions.RoleFromResourcePermissions(md.PermissionSet) - isShared := /*spaceType != _spaceTypeProject &&*/ !net.IsCurrentUserOwner(ctx, md.Owner) + // TODO we need a better way to determine if the webdav permissions should list S + // afaict when the storage space root info of the resource is a grant ... and not an actual space root. + isShared := md.Space != nil && md.Space.SpaceType != _spaceTypeProject && !net.IsCurrentUserOwner(ctx, md.Owner) var wdp string isPublic := ls != nil if md.PermissionSet != nil { diff --git a/internal/http/services/owncloud/ocdav/spacelookup/spacelookup.go b/internal/http/services/owncloud/ocdav/spacelookup/spacelookup.go index c7cbc7eb783..43d03e6c4dd 100644 --- a/internal/http/services/owncloud/ocdav/spacelookup/spacelookup.go +++ b/internal/http/services/owncloud/ocdav/spacelookup/spacelookup.go @@ -31,6 +31,7 @@ import ( "github.com/cs3org/reva/v2/pkg/rgrpc/status" "github.com/cs3org/reva/v2/pkg/storagespace" "github.com/cs3org/reva/v2/pkg/utils" + "google.golang.org/protobuf/types/known/fieldmaskpb" ) // LookupReferenceForPath returns: @@ -100,10 +101,16 @@ func LookUpStorageSpacesForPathWithChildren(ctx context.Context, client gateway. lSSReq := &storageProvider.ListStorageSpacesRequest{ Opaque: &typesv1beta1.Opaque{ Map: map[string]*typesv1beta1.OpaqueEntry{ - "path": {Decoder: "plain", Value: []byte(path)}, - "withChildMounts": {Decoder: "plain", Value: []byte("true")}, + // TODO encode requested metadata as json + //"metadata": {Decoder: "json", Value: []byte("*")}, }}, + // get all fields, including root_info + FieldMask: &fieldmaskpb.FieldMask{Paths: []string{"*"}}, } + // list all providers at or below the given path + lSSReq.Opaque = utils.AppendPlainToOpaque(lSSReq.Opaque, "path", path) + // we want to get all metadata? really? when looking up the space roots we actually only want etag, mtime and type so we can construct a child ... + lSSReq.Opaque = utils.AppendPlainToOpaque(lSSReq.Opaque, "metadata", "*") lSSRes, err := client.ListStorageSpaces(ctx, lSSReq) if err != nil {