From b2316c412b67e59bc89bee3915291327e686f170 Mon Sep 17 00:00:00 2001 From: Willy Kloucek Date: Tue, 1 Feb 2022 10:38:38 +0100 Subject: [PATCH 1/2] remove owncloud storage driver --- .../change-remove-owncloud-storage-driver.md | 10 + pkg/storage/fs/loader/loader.go | 1 - pkg/storage/fs/owncloud/owncloud.go | 2462 ----------------- pkg/storage/fs/owncloud/owncloud_unix.go | 84 - pkg/storage/fs/owncloud/owncloud_windows.go | 76 - pkg/storage/fs/owncloud/upload.go | 556 ---- 6 files changed, 10 insertions(+), 3179 deletions(-) create mode 100644 changelog/unreleased/change-remove-owncloud-storage-driver.md delete mode 100644 pkg/storage/fs/owncloud/owncloud.go delete mode 100755 pkg/storage/fs/owncloud/owncloud_unix.go delete mode 100644 pkg/storage/fs/owncloud/owncloud_windows.go delete mode 100644 pkg/storage/fs/owncloud/upload.go diff --git a/changelog/unreleased/change-remove-owncloud-storage-driver.md b/changelog/unreleased/change-remove-owncloud-storage-driver.md new file mode 100644 index 0000000000..ac54b5733f --- /dev/null +++ b/changelog/unreleased/change-remove-owncloud-storage-driver.md @@ -0,0 +1,10 @@ +Change: remove the ownCloud storage driver + +We've removed the ownCloud storage driver because it was no longer +maintained after the ownCloud SQL storage driver was added. + +If you have been using the ownCloud storage driver, please switch +to the ownCloud SQL storage driver which brings you more features and +is under active maintenance. + +https://github.com/cs3org/reva/pull/2495 diff --git a/pkg/storage/fs/loader/loader.go b/pkg/storage/fs/loader/loader.go index cd88c5ddc7..977c27532f 100644 --- a/pkg/storage/fs/loader/loader.go +++ b/pkg/storage/fs/loader/loader.go @@ -29,7 +29,6 @@ import ( _ "github.com/cs3org/reva/pkg/storage/fs/localhome" _ "github.com/cs3org/reva/pkg/storage/fs/nextcloud" _ "github.com/cs3org/reva/pkg/storage/fs/ocis" - _ "github.com/cs3org/reva/pkg/storage/fs/owncloud" _ "github.com/cs3org/reva/pkg/storage/fs/owncloudsql" _ "github.com/cs3org/reva/pkg/storage/fs/s3" _ "github.com/cs3org/reva/pkg/storage/fs/s3ng" diff --git a/pkg/storage/fs/owncloud/owncloud.go b/pkg/storage/fs/owncloud/owncloud.go deleted file mode 100644 index 82ee727b85..0000000000 --- a/pkg/storage/fs/owncloud/owncloud.go +++ /dev/null @@ -1,2462 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package owncloud - -import ( - "context" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" - "github.com/cs3org/reva/internal/grpc/services/storageprovider" - "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/logger" - "github.com/cs3org/reva/pkg/mime" - "github.com/cs3org/reva/pkg/rgrpc/todo/pool" - "github.com/cs3org/reva/pkg/sharedconf" - "github.com/cs3org/reva/pkg/storage" - "github.com/cs3org/reva/pkg/storage/fs/registry" - "github.com/cs3org/reva/pkg/storage/utils/ace" - "github.com/cs3org/reva/pkg/storage/utils/chunking" - "github.com/cs3org/reva/pkg/storage/utils/templates" - "github.com/gomodule/redigo/redis" - "github.com/google/uuid" - "github.com/mitchellh/mapstructure" - "github.com/pkg/errors" - "github.com/pkg/xattr" -) - -const ( - // Currently,extended file attributes have four separated - // namespaces (user, trusted, security and system) followed by a dot. - // A non root user can only manipulate the user. namespace, which is what - // we will use to store ownCloud specific metadata. To prevent name - // collisions with other apps We are going to introduce a sub namespace - // "user.oc." - ocPrefix string = "user.oc." - - // idAttribute is the name of the filesystem extended attribute that is used to store the uuid in - idAttribute string = ocPrefix + "id" - - // SharePrefix is the prefix for sharing related extended attributes - sharePrefix string = ocPrefix + "grant." // grants are similar to acls, but they are not propagated down the tree when being changed - trashOriginPrefix string = ocPrefix + "o" - mdPrefix string = ocPrefix + "md." // arbitrary metadata - favPrefix string = ocPrefix + "fav." // favorite flag, per user - etagPrefix string = ocPrefix + "etag." // allow overriding a calculated etag with one from the extended attributes - checksumPrefix string = ocPrefix + "cs." - checksumsKey string = "http://owncloud.org/ns/checksums" - favoriteKey string = "http://owncloud.org/ns/favorite" - - spaceTypeAny = "*" -) - -var defaultPermissions *provider.ResourcePermissions = &provider.ResourcePermissions{ - // no permissions -} -var ownerPermissions *provider.ResourcePermissions = &provider.ResourcePermissions{ - // all permissions - AddGrant: true, - CreateContainer: true, - Delete: true, - GetPath: true, - GetQuota: true, - InitiateFileDownload: true, - InitiateFileUpload: true, - ListContainer: true, - ListFileVersions: true, - ListGrants: true, - ListRecycle: true, - Move: true, - PurgeRecycle: true, - RemoveGrant: true, - RestoreFileVersion: true, - RestoreRecycleItem: true, - Stat: true, - UpdateGrant: true, -} - -func init() { - registry.Register("owncloud", New) -} - -type config struct { - DataDirectory string `mapstructure:"datadirectory"` - UploadInfoDir string `mapstructure:"upload_info_dir"` - DeprecatedShareDirectory string `mapstructure:"sharedirectory"` - ShareFolder string `mapstructure:"share_folder"` - UserLayout string `mapstructure:"user_layout"` - Redis string `mapstructure:"redis"` - EnableHome bool `mapstructure:"enable_home"` - Scan bool `mapstructure:"scan"` - UserProviderEndpoint string `mapstructure:"userprovidersvc"` -} - -func parseConfig(m map[string]interface{}) (*config, error) { - c := &config{} - if err := mapstructure.Decode(m, c); err != nil { - err = errors.Wrap(err, "error decoding conf") - return nil, err - } - return c, nil -} - -func (c *config) init(m map[string]interface{}) { - if c.Redis == "" { - c.Redis = ":6379" - } - if c.UserLayout == "" { - c.UserLayout = "{{.Id.OpaqueId}}" - } - if c.UploadInfoDir == "" { - c.UploadInfoDir = "/var/tmp/reva/uploadinfo" - } - // fallback for old config - if c.DeprecatedShareDirectory != "" { - c.ShareFolder = c.DeprecatedShareDirectory - } - if c.ShareFolder == "" { - c.ShareFolder = "/Shares" - } - // ensure share folder always starts with slash - c.ShareFolder = filepath.Join("/", c.ShareFolder) - - // default to scanning if not configured - if _, ok := m["scan"]; !ok { - c.Scan = true - } - c.UserProviderEndpoint = sharedconf.GetGatewaySVC(c.UserProviderEndpoint) -} - -// New returns an implementation to of the storage.FS interface that talk to -// a local filesystem. -func New(m map[string]interface{}) (storage.FS, error) { - c, err := parseConfig(m) - if err != nil { - return nil, err - } - c.init(m) - - // c.DataDirectory should never end in / unless it is the root? - c.DataDirectory = filepath.Clean(c.DataDirectory) - - // create datadir if it does not exist - err = os.MkdirAll(c.DataDirectory, 0700) - if err != nil { - logger.New().Error().Err(err). - Str("path", c.DataDirectory). - Msg("could not create datadir") - } - - err = os.MkdirAll(c.UploadInfoDir, 0700) - if err != nil { - logger.New().Error().Err(err). - Str("path", c.UploadInfoDir). - Msg("could not create uploadinfo dir") - } - - pool := &redis.Pool{ - - MaxIdle: 3, - IdleTimeout: 240 * time.Second, - - Dial: func() (redis.Conn, error) { - c, err := redis.Dial("tcp", c.Redis) - if err != nil { - return nil, err - } - return c, err - }, - - TestOnBorrow: func(c redis.Conn, t time.Time) error { - _, err := c.Do("PING") - return err - }, - } - - return &ocfs{ - c: c, - pool: pool, - chunkHandler: chunking.NewChunkHandler(c.UploadInfoDir), - }, nil -} - -type ocfs struct { - c *config - pool *redis.Pool - chunkHandler *chunking.ChunkHandler -} - -func (fs *ocfs) Shutdown(ctx context.Context) error { - return fs.pool.Close() -} - -// scan files and add uuid to path mapping to kv store -func (fs *ocfs) scanFiles(ctx context.Context, conn redis.Conn) { - if fs.c.Scan { - fs.c.Scan = false // TODO ... in progress use mutex ? - log := appctx.GetLogger(ctx) - log.Debug().Str("path", fs.c.DataDirectory).Msg("scanning data directory") - err := filepath.Walk(fs.c.DataDirectory, func(path string, info os.FileInfo, err error) error { - if err != nil { - log.Error().Str("path", path).Err(err).Msg("error accessing path") - return filepath.SkipDir - } - // TODO(jfd) skip versions folder only if direct in users home dir - // we need to skip versions, otherwise a lookup by id might resolve to a version - if strings.Contains(path, "files_versions") { - log.Debug().Str("path", path).Err(err).Msg("skipping versions") - return filepath.SkipDir - } - - // reuse connection to store file ids - id := readOrCreateID(context.Background(), path, nil) - _, err = conn.Do("SET", id, path) - if err != nil { - log.Error().Str("path", path).Err(err).Msg("error caching id") - // continue scanning - return nil - } - - log.Debug().Str("path", path).Str("id", id).Msg("scanned path") - return nil - }) - if err != nil { - log.Error().Err(err).Str("path", fs.c.DataDirectory).Msg("error scanning data directory") - } - } -} - -// owncloud stores files in the files subfolder -// the incoming path starts with /, so we need to insert the files subfolder into the path -// and prefix the data directory -// TODO the path handed to a storage provider should not contain the username -func (fs *ocfs) toInternalPath(ctx context.Context, sp string) (ip string) { - if fs.c.EnableHome { - u := ctxpkg.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.c.UserLayout) - // The inner filepath.Join prevents the path from breaking out of - // //files/ - ip = filepath.Join(fs.c.DataDirectory, layout, "files", filepath.Join("/", sp)) - } else { - // trim all / - sp = strings.Trim(sp, "/") - // p = "" or - // p = or - // p = /foo/bar.txt - segments := strings.SplitN(sp, "/", 2) - - if len(segments) == 1 && segments[0] == "" { - ip = fs.c.DataDirectory - return - } - - // parts[0] contains the username or userid. - u, err := fs.getUser(ctx, segments[0]) - if err != nil { - // TODO return invalid internal path? - return - } - layout := templates.WithUser(u, fs.c.UserLayout) - - if len(segments) == 1 { - // parts = "" - ip = filepath.Join(fs.c.DataDirectory, layout, "files") - } else { - // parts = "", "foo/bar.txt" - ip = filepath.Join(fs.c.DataDirectory, layout, "files", filepath.Join(segments[1])) - } - - } - return -} - -func (fs *ocfs) toInternalShadowPath(ctx context.Context, sp string) (internal string) { - if fs.c.EnableHome { - u := ctxpkg.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.c.UserLayout) - internal = filepath.Join(fs.c.DataDirectory, layout, "shadow_files", sp) - } else { - // trim all / - sp = strings.Trim(sp, "/") - // p = "" or - // p = or - // p = /foo/bar.txt - segments := strings.SplitN(sp, "/", 2) - - if len(segments) == 1 && segments[0] == "" { - internal = fs.c.DataDirectory - return - } - - // parts[0] contains the username or userid. - u, err := fs.getUser(ctx, segments[0]) - if err != nil { - // TODO return invalid internal path? - return - } - layout := templates.WithUser(u, fs.c.UserLayout) - - if len(segments) == 1 { - // parts = "" - internal = filepath.Join(fs.c.DataDirectory, layout, "shadow_files") - } else { - // parts = "", "foo/bar.txt" - internal = filepath.Join(fs.c.DataDirectory, layout, "shadow_files", segments[1]) - } - } - return -} - -// ownloud stores versions in the files_versions subfolder -// the incoming path starts with /, so we need to insert the files subfolder into the path -// and prefix the data directory -// TODO the path handed to a storage provider should not contain the username -func (fs *ocfs) getVersionsPath(ctx context.Context, ip string) string { - // ip = /path/to/data//files/foo/bar.txt - // remove data dir - if fs.c.DataDirectory != "/" { - // fs.c.DataDirectory is a clean path, so it never ends in / - ip = strings.TrimPrefix(ip, fs.c.DataDirectory) - } - // ip = //files/foo/bar.txt - parts := strings.SplitN(ip, "/", 4) - - // parts[1] contains the username or userid. - u, err := fs.getUser(ctx, parts[1]) - if err != nil { - // TODO return invalid internal path? - return "" - } - layout := templates.WithUser(u, fs.c.UserLayout) - - switch len(parts) { - case 3: - // parts = "", "" - return filepath.Join(fs.c.DataDirectory, layout, "files_versions") - case 4: - // parts = "", "", "foo/bar.txt" - return filepath.Join(fs.c.DataDirectory, layout, "files_versions", filepath.Join("/", parts[3])) - default: - return "" // TODO Must not happen? - } - -} - -// owncloud stores trashed items in the files_trashbin subfolder of a users home -func (fs *ocfs) getRecyclePath(ctx context.Context) (string, error) { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") - return "", err - } - layout := templates.WithUser(u, fs.c.UserLayout) - return filepath.Join(fs.c.DataDirectory, layout, "files_trashbin/files"), nil -} - -func (fs *ocfs) getVersionRecyclePath(ctx context.Context) (string, error) { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") - return "", err - } - layout := templates.WithUser(u, fs.c.UserLayout) - return filepath.Join(fs.c.DataDirectory, layout, "files_trashbin/files_versions"), nil -} - -func (fs *ocfs) toStoragePath(ctx context.Context, ip string) (sp string) { - if fs.c.EnableHome { - u := ctxpkg.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.c.UserLayout) - trim := filepath.Join(fs.c.DataDirectory, layout, "files") - sp = strings.TrimPrefix(ip, trim) - // root directory - if sp == "" { - sp = "/" - } - } else { - // ip = /data//files/foo/bar.txt - // remove data dir - if fs.c.DataDirectory != "/" { - // fs.c.DataDirectory is a clean path, so it never ends in / - ip = strings.TrimPrefix(ip, fs.c.DataDirectory) - // ip = //files/foo/bar.txt - } - - segments := strings.SplitN(ip, "/", 4) - // parts = "", "", "files", "foo/bar.txt" - switch len(segments) { - case 1: - sp = "/" - case 2: - sp = filepath.Join("/", segments[1]) - case 3: - sp = filepath.Join("/", segments[1]) - default: - sp = filepath.Join("/", segments[1], segments[3]) - } - } - log := appctx.GetLogger(ctx) - log.Debug().Str("driver", "ocfs").Str("ipath", ip).Str("spath", sp).Msg("toStoragePath") - return -} - -func (fs *ocfs) toStorageShadowPath(ctx context.Context, ip string) (sp string) { - if fs.c.EnableHome { - u := ctxpkg.ContextMustGetUser(ctx) - layout := templates.WithUser(u, fs.c.UserLayout) - trim := filepath.Join(fs.c.DataDirectory, layout, "shadow_files") - sp = strings.TrimPrefix(ip, trim) - } else { - // ip = /data//shadow_files/foo/bar.txt - // remove data dir - if fs.c.DataDirectory != "/" { - // fs.c.DataDirectory is a clean path, so it never ends in / - ip = strings.TrimPrefix(ip, fs.c.DataDirectory) - // ip = //shadow_files/foo/bar.txt - } - - segments := strings.SplitN(ip, "/", 4) - // parts = "", "", "shadow_files", "foo/bar.txt" - switch len(segments) { - case 1: - sp = "/" - case 2: - sp = filepath.Join("/", segments[1]) - case 3: - sp = filepath.Join("/", segments[1]) - default: - sp = filepath.Join("/", segments[1], segments[3]) - } - } - appctx.GetLogger(ctx).Debug().Str("driver", "ocfs").Str("ipath", ip).Str("spath", sp).Msg("toStorageShadowPath") - return -} - -// TODO the owner needs to come from a different place -func (fs *ocfs) getOwner(ip string) string { - ip = strings.TrimPrefix(ip, fs.c.DataDirectory) - parts := strings.SplitN(ip, "/", 3) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// TODO cache user lookup -func (fs *ocfs) getUser(ctx context.Context, usernameOrID string) (id *userpb.User, err error) { - u := ctxpkg.ContextMustGetUser(ctx) - // check if username matches and id is set - if u.Username == usernameOrID && u.Id != nil && u.Id.OpaqueId != "" { - return u, nil - } - // check if userid matches and username is set - if u.Id != nil && u.Id.OpaqueId == usernameOrID && u.Username != "" { - return u, nil - } - // look up at the userprovider - - // parts[0] contains the username or userid. use user service to look up id - c, err := pool.GetUserProviderServiceClient(fs.c.UserProviderEndpoint) - if err != nil { - appctx.GetLogger(ctx). - Error().Err(err). - Str("userprovidersvc", fs.c.UserProviderEndpoint). - Str("usernameOrID", usernameOrID). - Msg("could not get user provider client") - return nil, err - } - res, err := c.GetUser(ctx, &userpb.GetUserRequest{ - UserId: &userpb.UserId{OpaqueId: usernameOrID}, - }) - if err != nil { - appctx.GetLogger(ctx). - Error().Err(err). - Str("userprovidersvc", fs.c.UserProviderEndpoint). - Str("usernameOrID", usernameOrID). - Msg("could not get user") - return nil, err - } - - if res.Status.Code == rpc.Code_CODE_NOT_FOUND { - appctx.GetLogger(ctx). - Error(). - Str("userprovidersvc", fs.c.UserProviderEndpoint). - Str("usernameOrID", usernameOrID). - Interface("status", res.Status). - Msg("user not found") - return nil, fmt.Errorf("user not found") - } - - if res.Status.Code != rpc.Code_CODE_OK { - appctx.GetLogger(ctx). - Error(). - Str("userprovidersvc", fs.c.UserProviderEndpoint). - Str("usernameOrID", usernameOrID). - Interface("status", res.Status). - Msg("user lookup failed") - return nil, fmt.Errorf("user lookup failed") - } - return res.User, nil -} - -// permissionSet returns the permission set for the current user -func (fs *ocfs) permissionSet(ctx context.Context, owner *userpb.UserId) *provider.ResourcePermissions { - if owner == nil { - return &provider.ResourcePermissions{ - Stat: true, - } - } - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - return &provider.ResourcePermissions{ - // no permissions - } - } - if u.Id == nil { - return &provider.ResourcePermissions{ - // no permissions - } - } - if u.Id.OpaqueId == owner.OpaqueId && u.Id.Idp == owner.Idp { - return &provider.ResourcePermissions{ - // owner has all permissions - AddGrant: true, - CreateContainer: true, - Delete: true, - GetPath: true, - GetQuota: true, - InitiateFileDownload: true, - InitiateFileUpload: true, - ListContainer: true, - ListFileVersions: true, - ListGrants: true, - ListRecycle: true, - Move: true, - PurgeRecycle: true, - RemoveGrant: true, - RestoreFileVersion: true, - RestoreRecycleItem: true, - Stat: true, - UpdateGrant: true, - } - } - // TODO fix permissions for share recipients by traversing reading acls up to the root? cache acls for the parent node and reuse it - return &provider.ResourcePermissions{ - AddGrant: true, - CreateContainer: true, - Delete: true, - GetPath: true, - GetQuota: true, - InitiateFileDownload: true, - InitiateFileUpload: true, - ListContainer: true, - ListFileVersions: true, - ListGrants: true, - ListRecycle: true, - Move: true, - PurgeRecycle: true, - RemoveGrant: true, - RestoreFileVersion: true, - RestoreRecycleItem: true, - Stat: true, - UpdateGrant: true, - } -} -func (fs *ocfs) convertToResourceInfo(ctx context.Context, fi os.FileInfo, ip string, sp string, c redis.Conn, mdKeys []string) *provider.ResourceInfo { - id := readOrCreateID(ctx, ip, c) - - etag := calcEtag(ctx, fi) - - if val, err := xattr.Get(ip, etagPrefix+etag); err == nil { - appctx.GetLogger(ctx).Debug(). - Str("ipath", ip). - Str("calcetag", etag). - Str("etag", string(val)). - Msg("overriding calculated etag") - etag = string(val) - } - - mdKeysMap := make(map[string]struct{}) - for _, k := range mdKeys { - mdKeysMap[k] = struct{}{} - } - - var returnAllKeys bool - if _, ok := mdKeysMap["*"]; len(mdKeys) == 0 || ok { - returnAllKeys = true - } - - metadata := map[string]string{} - - if _, ok := mdKeysMap[favoriteKey]; returnAllKeys || ok { - favorite := "" - if u, ok := ctxpkg.ContextGetUser(ctx); ok { - // the favorite flag is specific to the user, so we need to incorporate the userid - if uid := u.GetId(); uid != nil { - fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) - if val, err := xattr.Get(ip, fa); err == nil { - appctx.GetLogger(ctx).Debug(). - Str("ipath", ip). - Str("favorite", string(val)). - Str("username", u.GetUsername()). - Msg("found favorite flag") - favorite = string(val) - } - } else { - appctx.GetLogger(ctx).Error().Err(errtypes.UserRequired("userrequired")).Msg("user has no id") - } - } else { - appctx.GetLogger(ctx).Error().Err(errtypes.UserRequired("userrequired")).Msg("error getting user from ctx") - } - metadata[favoriteKey] = favorite - } - - list, err := xattr.List(ip) - if err == nil { - for _, entry := range list { - // filter out non-custom properties - if !strings.HasPrefix(entry, mdPrefix) { - continue - } - if val, err := xattr.Get(ip, entry); err == nil { - k := entry[len(mdPrefix):] - if _, ok := mdKeysMap[k]; returnAllKeys || ok { - metadata[k] = string(val) - } - } else { - appctx.GetLogger(ctx).Error().Err(err). - Str("entry", entry). - Msg("error retrieving xattr metadata") - } - } - } else { - appctx.GetLogger(ctx).Error().Err(err).Msg("error getting list of extended attributes") - } - - ri := &provider.ResourceInfo{ - Id: &provider.ResourceId{OpaqueId: id}, - Path: sp, - Type: getResourceType(fi.IsDir()), - Etag: etag, - MimeType: mime.Detect(fi.IsDir(), ip), - Size: uint64(fi.Size()), - Mtime: &types.Timestamp{ - Seconds: uint64(fi.ModTime().Unix()), - // TODO read nanos from where? Nanos: fi.MTimeNanos, - }, - ArbitraryMetadata: &provider.ArbitraryMetadata{ - Metadata: metadata, - }, - } - - if owner, err := fs.getUser(ctx, fs.getOwner(ip)); err == nil { - ri.Owner = owner.Id - } else { - appctx.GetLogger(ctx).Error().Err(err).Msg("error getting owner") - } - - ri.PermissionSet = fs.permissionSet(ctx, ri.Owner) - - // checksums - if !fi.IsDir() { - if _, checksumRequested := mdKeysMap[checksumsKey]; returnAllKeys || checksumRequested { - // TODO which checksum was requested? sha1 adler32 or md5? for now hardcode sha1? - readChecksumIntoResourceChecksum(ctx, ip, storageprovider.XSSHA1, ri) - readChecksumIntoOpaque(ctx, ip, storageprovider.XSMD5, ri) - readChecksumIntoOpaque(ctx, ip, storageprovider.XSAdler32, ri) - } - } - - return ri -} -func getResourceType(isDir bool) provider.ResourceType { - if isDir { - return provider.ResourceType_RESOURCE_TYPE_CONTAINER - } - return provider.ResourceType_RESOURCE_TYPE_FILE -} - -// CreateStorageSpace creates a storage space -func (fs *ocfs) CreateStorageSpace(ctx context.Context, req *provider.CreateStorageSpaceRequest) (*provider.CreateStorageSpaceResponse, error) { - if req.Type != "personal" { - return nil, errtypes.NotSupported("only personal spaces supported") - } - - layout := templates.WithUser(req.Owner, fs.c.UserLayout) - - homePaths := []string{ - filepath.Join(fs.c.DataDirectory, layout, "files"), - filepath.Join(fs.c.DataDirectory, layout, "files_trashbin"), - filepath.Join(fs.c.DataDirectory, layout, "files_versions"), - filepath.Join(fs.c.DataDirectory, layout, "uploads"), - filepath.Join(fs.c.DataDirectory, layout, "shadow_files"), - } - - for _, v := range homePaths { - if err := os.MkdirAll(v, 0700); err != nil { - return nil, errors.Wrap(err, "ocfs: error creating home path: "+v) - } - } - - return &provider.CreateStorageSpaceResponse{ - Status: &rpc.Status{Code: rpc.Code_CODE_OK}, - }, nil -} - -func readOrCreateID(ctx context.Context, ip string, conn redis.Conn) string { - log := appctx.GetLogger(ctx) - - // read extended file attribute for id - // generate if not present - var id []byte - var err error - if id, err = xattr.Get(ip, idAttribute); err != nil { - log.Warn().Err(err).Str("driver", "owncloud").Str("ipath", ip).Msg("error reading file id") - - uuid := uuid.New() - // store uuid - id = uuid[:] - if err := xattr.Set(ip, idAttribute, id); err != nil { - log.Error().Err(err).Str("driver", "owncloud").Str("ipath", ip).Msg("error storing file id") - } - // TODO cache path for uuid in redis - // TODO reuse conn? - if conn != nil { - _, err := conn.Do("SET", uuid.String(), ip) - if err != nil { - log.Error().Err(err).Str("driver", "owncloud").Str("ipath", ip).Msg("error caching id") - // continue - } - } - } - // todo sign metadata - var uid uuid.UUID - if uid, err = uuid.FromBytes(id); err != nil { - log.Error().Err(err).Msg("error parsing uuid") - return "" - } - return uid.String() -} - -func (fs *ocfs) getPath(ctx context.Context, id *provider.ResourceId) (string, error) { - log := appctx.GetLogger(ctx) - c := fs.pool.Get() - defer c.Close() - fs.scanFiles(ctx, c) - ip, err := redis.String(c.Do("GET", id.OpaqueId)) - if err != nil { - return "", errtypes.NotFound(id.OpaqueId) - } - - idFromXattr, err := xattr.Get(ip, idAttribute) - if err != nil { - return "", errtypes.NotFound(id.OpaqueId) - } - - uid, err := uuid.FromBytes(idFromXattr) - if err != nil { - log.Error().Err(err).Msg("error parsing uuid") - } - - if uid.String() != id.OpaqueId { - if _, err := c.Do("DEL", id.OpaqueId); err != nil { - return "", err - } - return "", errtypes.NotFound(id.OpaqueId) - } - - return ip, nil -} - -// GetPathByID returns the storage relative path for the file id, without the internal namespace -func (fs *ocfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) { - ip, err := fs.getPath(ctx, id) - if err != nil { - return "", err - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.GetPath { - return "", errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return "", errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return "", errors.Wrap(err, "ocfs: error reading permissions") - } - - return fs.toStoragePath(ctx, ip), nil -} - -// resolve takes in a request path or request id and converts it to an internal path. -func (fs *ocfs) resolve(ctx context.Context, ref *provider.Reference) (string, error) { - - // if storage id is set look up that - if ref.ResourceId != nil { - ip, err := fs.getPath(ctx, ref.ResourceId) - if err != nil { - return "", err - } - return filepath.Join("/", ip, filepath.Join("/", ref.Path)), nil - } - - // use a path - return fs.toInternalPath(ctx, ref.Path), nil - -} - -func (fs *ocfs) DenyGrant(ctx context.Context, ref *provider.Reference, g *provider.Grantee) error { - return errtypes.NotSupported("ocfs: deny grant not supported") -} - -func (fs *ocfs) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.AddGrant { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - e := ace.FromGrant(g) - principal, value := e.Marshal() - if err := xattr.Set(ip, sharePrefix+principal, value); err != nil { - return err - } - return fs.propagate(ctx, ip) -} - -// extractACEsFromAttrs reads ACEs in the list of attrs from the file -func extractACEsFromAttrs(ctx context.Context, ip string, attrs []string) (entries []*ace.ACE) { - log := appctx.GetLogger(ctx) - entries = []*ace.ACE{} - for i := range attrs { - if strings.HasPrefix(attrs[i], sharePrefix) { - var value []byte - var err error - if value, err = xattr.Get(ip, attrs[i]); err != nil { - log.Error().Err(err).Str("attr", attrs[i]).Msg("could not read attribute") - continue - } - var e *ace.ACE - principal := attrs[i][len(sharePrefix):] - if e, err = ace.Unmarshal(principal, value); err != nil { - log.Error().Err(err).Str("principal", principal).Str("attr", attrs[i]).Msg("could not unmarshal ace") - continue - } - entries = append(entries, e) - } - } - return -} - -// TODO if user is owner but no acls found he can do everything? -// The owncloud driver does not integrate with the os so, for now, the owner can do everything, see ownerPermissions. -// Should this change we can store an acl for the owner in every node. -// We could also add default acls that can only the admin can set, eg for a read only storage? -// Someone needs to write to provide the content that should be read only, so this would likely be an acl for a group anyway. -// We need the storage relative path so we can calculate the permissions -// for the node based on all acls in the tree up to the root -func (fs *ocfs) readPermissions(ctx context.Context, ip string) (p *provider.ResourcePermissions, err error) { - - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - appctx.GetLogger(ctx).Debug().Str("ipath", ip).Msg("no user in context, returning default permissions") - return defaultPermissions, nil - } - // check if the current user is the owner - if fs.getOwner(ip) == u.Id.OpaqueId { - appctx.GetLogger(ctx).Debug().Str("ipath", ip).Msg("user is owner, returning owner permissions") - return ownerPermissions, nil - } - - // for non owners this is a little more complicated: - aggregatedPermissions := &provider.ResourcePermissions{} - // add default permissions - addPermissions(aggregatedPermissions, defaultPermissions) - - // determine root - rp := fs.toInternalPath(ctx, "") - // TODO rp will be the datadir ... be we don't want to go up that high. The users home is far enough - np := ip - - if ip == rp { - return &provider.ResourcePermissions{ - // grant read access to the root - GetPath: true, - GetQuota: true, - ListContainer: true, - ListFileVersions: true, - ListGrants: true, - ListRecycle: true, - Stat: true, - }, nil - } - - // for an efficient group lookup convert the list of groups to a map - // groups are just strings ... groupnames ... or group ids ??? AAARGH !!! - groupsMap := make(map[string]bool, len(u.Groups)) - for i := range u.Groups { - groupsMap[u.Groups[i]] = true - } - - var e *ace.ACE - // for all segments, starting at the leaf - for np != rp { - - var attrs []string - if attrs, err = xattr.List(np); err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("ipath", np).Msg("error listing attributes") - return nil, err - } - - userace := sharePrefix + "u:" + u.Id.OpaqueId - userFound := false - for i := range attrs { - // we only need the find the user once per node - switch { - case !userFound && attrs[i] == userace: - e, err = fs.readACE(ctx, np, "u:"+u.Id.OpaqueId) - case strings.HasPrefix(attrs[i], sharePrefix+"g:"): - g := strings.TrimPrefix(attrs[i], sharePrefix+"g:") - if groupsMap[g] { - e, err = fs.readACE(ctx, np, "g:"+g) - } else { - // no need to check attribute - continue - } - default: - // no need to check attribute - continue - } - - switch { - case err == nil: - addPermissions(aggregatedPermissions, e.Grant().GetPermissions()) - appctx.GetLogger(ctx).Debug().Str("ipath", np).Str("principal", strings.TrimPrefix(attrs[i], sharePrefix)).Interface("permissions", aggregatedPermissions).Msg("adding permissions") - case isNoData(err): - err = nil - appctx.GetLogger(ctx).Error().Str("ipath", np).Str("principal", strings.TrimPrefix(attrs[i], sharePrefix)).Interface("attrs", attrs).Msg("no permissions found on node, but they were listed") - default: - appctx.GetLogger(ctx).Error().Err(err).Str("ipath", np).Str("principal", strings.TrimPrefix(attrs[i], sharePrefix)).Msg("error reading permissions") - return nil, err - } - } - - np = filepath.Dir(np) - } - - // 3. read user permissions until one is found? - // what if, when checking /a/b/c/d, /a/b has write permission, but /a/b/c has not? - // those are two shares one read only, and a higher one rw, - // should the higher one be used? - // or, since we did find a matching ace in a lower node use that because it matches the principal? - // this would allow ai user to share a folder rm but take away the write capability for eg a docs folder inside it. - // 4. read group permissions until all groups of the user are matched? - // same as for user permission, but we need to keep going further up the tree until all groups of the user were matched. - // what if a user has thousands of groups? - // we will always have to walk to the root. - // but the same problem occurs for a user with 2 groups but where only one group was used to share. - // in any case we need to iterate the aces, not the number of groups of the user. - // listing the aces can be used to match the principals, we do not need to fully real all aces - // what if, when checking /a/b/c/d, /a/b has write permission for group g, but /a/b/c has an ace for another group h the user is also a member of? - // it would allow restricting a users permissions by resharing something with him with lower permission? - // so if you have reshare permissions you could accidentially restrict users access to a subfolder of a rw share to ro by sharing it to another group as ro when they are part of both groups - // it makes more sense to have explicit negative permissions - - // TODO we need to read all parents ... until we find a matching ace? - appctx.GetLogger(ctx).Debug().Interface("permissions", aggregatedPermissions).Str("ipath", ip).Msg("returning aggregated permissions") - return aggregatedPermissions, nil -} - -func isNoData(err error) bool { - if xerr, ok := err.(*xattr.Error); ok { - if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { - return serr == syscall.ENODATA - } - } - return false -} - -// The os not exists error is buried inside the xattr error, -// so we cannot just use os.IsNotExists(). -func isNotFound(err error) bool { - if xerr, ok := err.(*xattr.Error); ok { - if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { - return serr == syscall.ENOENT - } - } - return false -} - -func (fs *ocfs) readACE(ctx context.Context, ip string, principal string) (e *ace.ACE, err error) { - var b []byte - if b, err = xattr.Get(ip, sharePrefix+principal); err != nil { - return nil, err - } - if e, err = ace.Unmarshal(principal, b); err != nil { - return nil, err - } - return -} - -// additive merging of permissions only -func addPermissions(p1 *provider.ResourcePermissions, p2 *provider.ResourcePermissions) { - p1.AddGrant = p1.AddGrant || p2.AddGrant - p1.CreateContainer = p1.CreateContainer || p2.CreateContainer - p1.Delete = p1.Delete || p2.Delete - p1.GetPath = p1.GetPath || p2.GetPath - p1.GetQuota = p1.GetQuota || p2.GetQuota - p1.InitiateFileDownload = p1.InitiateFileDownload || p2.InitiateFileDownload - p1.InitiateFileUpload = p1.InitiateFileUpload || p2.InitiateFileUpload - p1.ListContainer = p1.ListContainer || p2.ListContainer - p1.ListFileVersions = p1.ListFileVersions || p2.ListFileVersions - p1.ListGrants = p1.ListGrants || p2.ListGrants - p1.ListRecycle = p1.ListRecycle || p2.ListRecycle - p1.Move = p1.Move || p2.Move - p1.PurgeRecycle = p1.PurgeRecycle || p2.PurgeRecycle - p1.RemoveGrant = p1.RemoveGrant || p2.RemoveGrant - p1.RestoreFileVersion = p1.RestoreFileVersion || p2.RestoreFileVersion - p1.RestoreRecycleItem = p1.RestoreRecycleItem || p2.RestoreRecycleItem - p1.Stat = p1.Stat || p2.Stat - p1.UpdateGrant = p1.UpdateGrant || p2.UpdateGrant -} - -func (fs *ocfs) ListGrants(ctx context.Context, ref *provider.Reference) (grants []*provider.Grant, err error) { - log := appctx.GetLogger(ctx) - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return nil, errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListGrants { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - var attrs []string - if attrs, err = xattr.List(ip); err != nil { - // TODO err might be a not exists - log.Error().Err(err).Msg("error listing attributes") - return nil, err - } - - log.Debug().Interface("attrs", attrs).Msg("read attributes") - - aces := extractACEsFromAttrs(ctx, ip, attrs) - - grants = make([]*provider.Grant, 0, len(aces)) - for i := range aces { - grants = append(grants, aces[i].Grant()) - } - - return grants, nil -} - -func (fs *ocfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { - - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - var attr string - if g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP { - attr = sharePrefix + "g:" + g.Grantee.GetGroupId().OpaqueId - } else { - attr = sharePrefix + "u:" + g.Grantee.GetUserId().OpaqueId - } - - if err = xattr.Remove(ip, attr); err != nil { - return - } - - return fs.propagate(ctx, ip) -} - -func (fs *ocfs) UpdateGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.UpdateGrant { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - e := ace.FromGrant(g) - principal, value := e.Marshal() - if err := xattr.Set(ip, sharePrefix+principal, value); err != nil { - return err - } - return fs.propagate(ctx, ip) -} - -func (fs *ocfs) CreateHome(ctx context.Context) error { - return errtypes.NotSupported("use CreateStorageSpace with type personal") - /* - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") - return err - } - layout := templates.WithUser(u, fs.c.UserLayout) - - homePaths := []string{ - filepath.Join(fs.c.DataDirectory, layout, "files"), - filepath.Join(fs.c.DataDirectory, layout, "files_trashbin"), - filepath.Join(fs.c.DataDirectory, layout, "files_versions"), - filepath.Join(fs.c.DataDirectory, layout, "uploads"), - filepath.Join(fs.c.DataDirectory, layout, "shadow_files"), - } - - for _, v := range homePaths { - if err := os.MkdirAll(v, 0700); err != nil { - return errors.Wrap(err, "ocfs: error creating home path: "+v) - } - } - - return nil - */ -} - -// If home is enabled, the relative home is always the empty string -func (fs *ocfs) GetHome(ctx context.Context) (string, error) { - return "", errtypes.NotSupported("use CreateStorageSpace with type personal") - /* - if !fs.c.EnableHome { - return "", errtypes.NotSupported("ocfs: get home not supported") - } - return "", nil - */ -} - -func (fs *ocfs) CreateDir(ctx context.Context, ref *provider.Reference) (err error) { - - ip, err := fs.resolve(ctx, ref) - if err != nil { - return err - } - - // check permissions of parent dir - if perm, err := fs.readPermissions(ctx, filepath.Dir(ip)); err == nil { - if !perm.CreateContainer { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(ref.Path) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - if err = os.Mkdir(ip, 0700); err != nil { - if os.IsNotExist(err) { - return errtypes.NotFound(ref.Path) - } - // FIXME we also need already exists error, webdav expects 405 MethodNotAllowed - return errors.Wrap(err, "ocfs: error creating dir "+ref.Path) - } - return fs.propagate(ctx, ip) -} - -// TouchFile as defined in the storage.FS interface -func (fs *ocfs) TouchFile(ctx context.Context, ref *provider.Reference) error { - return fmt.Errorf("unimplemented: TouchFile") -} - -func (fs *ocfs) isShareFolderChild(sp string) bool { - return strings.HasPrefix(sp, fs.c.ShareFolder) -} - -func (fs *ocfs) isShareFolderRoot(sp string) bool { - return sp == fs.c.ShareFolder -} - -func (fs *ocfs) CreateReference(ctx context.Context, sp string, targetURI *url.URL) error { - if !fs.isShareFolderChild(sp) { - return errtypes.PermissionDenied("ocfs: cannot create references outside the share folder: share_folder=" + "/Shares" + " path=" + sp) - } - - ip := fs.toInternalShadowPath(ctx, sp) - // TODO check permission? - - dir, _ := filepath.Split(ip) - if err := os.MkdirAll(dir, 0700); err != nil { - return errors.Wrapf(err, "ocfs: error creating shadow path %s", dir) - } - - f, err := os.Create(ip) - if err != nil { - return errors.Wrapf(err, "ocfs: error creating shadow file %s", ip) - } - - err = xattr.FSet(f, mdPrefix+"target", []byte(targetURI.String())) - if err != nil { - return errors.Wrapf(err, "ocfs: error setting the target %s on the shadow file %s", targetURI.String(), ip) - } - return nil -} - -func (fs *ocfs) setMtime(ctx context.Context, ip string, mtime string) error { - log := appctx.GetLogger(ctx) - if mt, err := parseMTime(mtime); err == nil { - // updating mtime also updates atime - if err := os.Chtimes(ip, mt, mt); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Time("mtime", mt). - Msg("could not set mtime") - return errors.Wrap(err, "could not set mtime") - } - } else { - log.Error().Err(err). - Str("ipath", ip). - Str("mtime", mtime). - Msg("could not parse mtime") - return errors.Wrap(err, "could not parse mtime") - } - return nil -} -func (fs *ocfs) SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) (err error) { - log := appctx.GetLogger(ctx) - - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.InitiateFileUpload { // TODO add dedicated permission? - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - var fi os.FileInfo - fi, err = os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "ocfs: error stating "+ip) - } - - errs := []error{} - - if md.Metadata != nil { - if val, ok := md.Metadata["mtime"]; ok { - err := fs.setMtime(ctx, ip, val) - if err != nil { - errs = append(errs, errors.Wrap(err, "could not set mtime")) - } - // remove from metadata - delete(md.Metadata, "mtime") - } - // TODO(jfd) special handling for atime? - // TODO(jfd) allow setting birth time (btime)? - // TODO(jfd) any other metadata that is interesting? fileid? - if val, ok := md.Metadata["etag"]; ok { - etag := calcEtag(ctx, fi) - val = fmt.Sprintf("\"%s\"", strings.Trim(val, "\"")) - if etag == val { - log.Debug(). - Str("ipath", ip). - Str("etag", val). - Msg("ignoring request to update identical etag") - } else - // etag is only valid until the calculated etag changes - // TODO(jfd) cleanup in a batch job - if err := xattr.Set(ip, etagPrefix+etag, []byte(val)); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Str("calcetag", etag). - Str("etag", val). - Msg("could not set etag") - errs = append(errs, errors.Wrap(err, "could not set etag")) - } - delete(md.Metadata, "etag") - } - if val, ok := md.Metadata["http://owncloud.org/ns/favorite"]; ok { - // TODO we should not mess with the user here ... the favorites is now a user specific property for a file - // that cannot be mapped to extended attributes without leaking who has marked a file as a favorite - // it is a specific case of a tag, which is user individual as well - // TODO there are different types of tags - // 1. public that are managed by everyone - // 2. private tags that are only visible to the user - // 3. system tags that are only visible to the system - // 4. group tags that are only visible to a group ... - // urgh ... well this can be solved using different namespaces - // 1. public = p: - // 2. private = u:: for user specific - // 3. system = s: for system - // 4. group = g:: - // 5. app? = a:: for apps? - // obviously this only is secure when the u/s/g/a namespaces are not accessible by users in the filesystem - // public tags can be mapped to extended attributes - if u, ok := ctxpkg.ContextGetUser(ctx); ok { - // the favorite flag is specific to the user, so we need to incorporate the userid - if uid := u.GetId(); uid != nil { - fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) - if err := xattr.Set(ip, fa, []byte(val)); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Interface("user", u). - Str("key", fa). - Msg("could not set favorite flag") - errs = append(errs, errors.Wrap(err, "could not set favorite flag")) - } - } else { - log.Error(). - Str("ipath", ip). - Interface("user", u). - Msg("user has no id") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) - } - } else { - log.Error(). - Str("ipath", ip). - Interface("user", u). - Msg("error getting user from ctx") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx")) - } - // remove from metadata - delete(md.Metadata, "http://owncloud.org/ns/favorite") - } - } - for k, v := range md.Metadata { - if err := xattr.Set(ip, mdPrefix+k, []byte(v)); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Str("key", k). - Str("val", v). - Msg("could not set metadata") - errs = append(errs, errors.Wrap(err, "could not set metadata")) - } - } - switch len(errs) { - case 0: - return fs.propagate(ctx, ip) - case 1: - return errs[0] - default: - // TODO how to return multiple errors? - return errors.New("multiple errors occurred, see log for details") - } -} - -func parseMTime(v string) (t time.Time, err error) { - p := strings.SplitN(v, ".", 2) - var sec, nsec int64 - if sec, err = strconv.ParseInt(p[0], 10, 64); err == nil { - if len(p) > 1 { - nsec, err = strconv.ParseInt(p[1], 10, 64) - } - } - return time.Unix(sec, nsec), err -} - -func (fs *ocfs) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) (err error) { - log := appctx.GetLogger(ctx) - - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.InitiateFileUpload { // TODO add dedicated permission? - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - _, err = os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "ocfs: error stating "+ip) - } - - errs := []error{} - for _, k := range keys { - switch k { - case "http://owncloud.org/ns/favorite": - if u, ok := ctxpkg.ContextGetUser(ctx); ok { - // the favorite flag is specific to the user, so we need to incorporate the userid - if uid := u.GetId(); uid != nil { - fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) - if err := xattr.Remove(ip, fa); err != nil { - log.Error().Err(err). - Str("ipath", ip). - Interface("user", u). - Str("key", fa). - Msg("could not unset favorite flag") - errs = append(errs, errors.Wrap(err, "could not unset favorite flag")) - } - } else { - log.Error(). - Str("ipath", ip). - Interface("user", u). - Msg("user has no id") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) - } - } else { - log.Error(). - Str("ipath", ip). - Interface("user", u). - Msg("error getting user from ctx") - errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx")) - } - default: - if err = xattr.Remove(ip, mdPrefix+k); err != nil { - // a non-existing attribute will return an error, which we can ignore - // (using string compare because the error type is syscall.Errno and not wrapped/recognizable) - if e, ok := err.(*xattr.Error); !ok || !(e.Err.Error() == "no data available" || - // darwin - e.Err.Error() == "attribute not found") { - log.Error().Err(err). - Str("ipath", ip). - Str("key", k). - Msg("could not unset metadata") - errs = append(errs, errors.Wrap(err, "could not unset metadata")) - } - } - } - } - - switch len(errs) { - case 0: - return fs.propagate(ctx, ip) - case 1: - return errs[0] - default: - // TODO how to return multiple errors? - return errors.New("multiple errors occurred, see log for details") - } -} - -// GetLock returns an existing lock on the given reference -func (fs *ocfs) GetLock(ctx context.Context, ref *provider.Reference) (*provider.Lock, error) { - return nil, errtypes.NotSupported("unimplemented") -} - -// SetLock puts a lock on the given reference -func (fs *ocfs) SetLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error { - return errtypes.NotSupported("unimplemented") -} - -// RefreshLock refreshes an existing lock on the given reference -func (fs *ocfs) RefreshLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error { - return errtypes.NotSupported("unimplemented") -} - -// Unlock removes an existing lock from the given reference -func (fs *ocfs) Unlock(ctx context.Context, ref *provider.Reference) error { - return errtypes.NotSupported("unimplemented") -} - -// Delete is actually only a move to trash -// -// This is a first optimistic approach. -// When a file has versions and we want to delete the file it could happen that -// the service crashes before all moves are finished. -// That would result in invalid state like the main files was moved but the -// versions were not. -// We will live with that compromise since this storage driver will be -// deprecated soon. -func (fs *ocfs) Delete(ctx context.Context, ref *provider.Reference) (err error) { - var ip string - if ip, err = fs.resolve(ctx, ref); err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.Delete { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - _, err = os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return errors.Wrap(err, "ocfs: error stating "+ip) - } - - rp, err := fs.getRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "ocfs: error resolving recycle path") - } - - if err := os.MkdirAll(rp, 0700); err != nil { - return errors.Wrap(err, "ocfs: error creating trashbin dir "+rp) - } - - // ip is the path on disk ... we need only the path relative to root - origin := filepath.Dir(fs.toStoragePath(ctx, ip)) - - err = fs.trash(ctx, ip, rp, origin) - if err != nil { - return errors.Wrapf(err, "ocfs: error deleting file %s", ip) - } - err = fs.trashVersions(ctx, ip, origin) - if err != nil { - return errors.Wrapf(err, "ocfs: error deleting versions of file %s", ip) - } - return nil -} - -func (fs *ocfs) trash(ctx context.Context, ip string, rp string, origin string) error { - // set origin location in metadata - if err := xattr.Set(ip, trashOriginPrefix, []byte(origin)); err != nil { - return err - } - - // move to trash location - dtime := time.Now().Unix() - tgt := filepath.Join(rp, fmt.Sprintf("%s.d%d", filepath.Base(ip), dtime)) - - // The condition reads: "if the file exists" - // I know this check is hard to read because of the double negation - // but this way we avoid to duplicate the code following the if block. - // If two deletes happen fast consecutively they will have the same `dtime`, - // therefore we have to increase the 'dtime' to avoid collisions. - if _, err := os.Stat(tgt); !errors.Is(err, os.ErrNotExist) { - // timestamp collision, try again with higher value: - dtime++ - tgt = filepath.Join(rp, fmt.Sprintf("%s.d%d", filepath.Base(ip), dtime)) - } - if err := os.Rename(ip, tgt); err != nil { - return errors.Wrap(err, "ocfs: could not move item to trash") - } - - return fs.propagate(ctx, filepath.Dir(ip)) -} - -func (fs *ocfs) trashVersions(ctx context.Context, ip string, origin string) error { - vp := fs.getVersionsPath(ctx, ip) - vrp, err := fs.getVersionRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "error resolving versions recycle path") - } - - if err := os.MkdirAll(vrp, 0700); err != nil { - return errors.Wrap(err, "ocfs: error creating trashbin dir "+vrp) - } - - // Ignore error since the only possible error is malformed pattern. - versions, _ := filepath.Glob(vp + ".v*") - for _, v := range versions { - err := fs.trash(ctx, v, vrp, origin) - if err != nil { - return errors.Wrap(err, "ocfs: error deleting file "+v) - } - } - return nil -} - -func (fs *ocfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (err error) { - var oldIP string - if oldIP, err = fs.resolve(ctx, oldRef); err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, oldIP); err == nil { - if !perm.Move { // TODO add dedicated permission? - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(oldIP))) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - var newIP string - if newIP, err = fs.resolve(ctx, newRef); err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // TODO check target permissions ... if it exists - - if err = os.Rename(oldIP, newIP); err != nil { - return errors.Wrap(err, "ocfs: error moving "+oldIP+" to "+newIP) - } - - log := appctx.GetLogger(ctx) - conn := fs.pool.Get() - defer conn.Close() - // Ideally if we encounter an error here we should rollback the Move/Rename. - // But since the owncloud storage driver is not being actively used by anyone other - // than the acceptance tests we should be fine by ignoring the errors. - _ = filepath.Walk(newIP, func(path string, info os.FileInfo, err error) error { - if err != nil { - // TODO(c0rby): rollback the move in case of an error - log.Error().Str("path", path).Err(err).Msg("error caching id") - return nil - } - id := readOrCreateID(context.Background(), path, nil) - _, err = conn.Do("SET", id, path) - if err != nil { - // TODO(c0rby): rollback the move in case of an error - log.Error().Str("path", path).Err(err).Msg("error caching id") - } - return nil - }) - if err := fs.propagate(ctx, newIP); err != nil { - return err - } - if err := fs.propagate(ctx, filepath.Dir(oldIP)); err != nil { - return err - } - return nil -} - -func (fs *ocfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string) (*provider.ResourceInfo, error) { - ip, err := fs.resolve(ctx, ref) - if err != nil { - // TODO return correct errtype - if _, ok := err.(errtypes.IsNotFound); ok { - return nil, err - } - return nil, errors.Wrap(err, "ocfs: error resolving reference") - } - p := fs.toStoragePath(ctx, ip) - - if fs.c.EnableHome { - if fs.isShareFolderChild(p) { - return fs.getMDShareFolder(ctx, p, mdKeys) - } - } - - // If GetMD is called for a path shared with the user then the path is - // already wrapped. (fs.resolve wraps the path) - if strings.HasPrefix(p, fs.c.DataDirectory) { - ip = p - } - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.Stat { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - md, err := os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return nil, errors.Wrap(err, "ocfs: error stating "+ip) - } - c := fs.pool.Get() - defer c.Close() - m := fs.convertToResourceInfo(ctx, md, ip, fs.toStoragePath(ctx, ip), c, mdKeys) - - return m, nil -} - -func (fs *ocfs) getMDShareFolder(ctx context.Context, sp string, mdKeys []string) (*provider.ResourceInfo, error) { - ip := fs.toInternalShadowPath(ctx, sp) - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.Stat { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - md, err := os.Stat(ip) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStorageShadowPath(ctx, ip)) - } - return nil, errors.Wrapf(err, "ocfs: error stating %s", ip) - } - c := fs.pool.Get() - defer c.Close() - m := fs.convertToResourceInfo(ctx, md, ip, fs.toStorageShadowPath(ctx, ip), c, mdKeys) - if !fs.isShareFolderRoot(sp) { - m.Type = provider.ResourceType_RESOURCE_TYPE_REFERENCE - ref, err := xattr.Get(ip, mdPrefix+"target") - if err != nil { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStorageShadowPath(ctx, ip)) - } - return nil, err - } - m.Target = string(ref) - } - - return m, nil -} - -func (fs *ocfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string) ([]*provider.ResourceInfo, error) { - log := appctx.GetLogger(ctx) - - ip, err := fs.resolve(ctx, ref) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error resolving reference") - } - sp := fs.toStoragePath(ctx, ip) - - if fs.c.EnableHome { - log.Debug().Msg("home enabled") - if strings.HasPrefix(sp, "/") { - // permissions checked in listWithHome - return fs.listWithHome(ctx, "/", sp, mdKeys) - } - } - - log.Debug().Msg("list with nominal home") - // permissions checked in listWithNominalHome - return fs.listWithNominalHome(ctx, sp, mdKeys) -} - -func (fs *ocfs) listWithNominalHome(ctx context.Context, ip string, mdKeys []string) ([]*provider.ResourceInfo, error) { - - // If a user wants to list a folder shared with him the path will already - // be wrapped with the files directory path of the share owner. - // In that case we don't want to wrap the path again. - if !strings.HasPrefix(ip, fs.c.DataDirectory) { - ip = fs.toInternalPath(ctx, ip) - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - mds, err := ioutil.ReadDir(ip) - if err != nil { - return nil, errors.Wrapf(err, "ocfs: error listing %s", ip) - } - c := fs.pool.Get() - defer c.Close() - finfos := []*provider.ResourceInfo{} - for _, md := range mds { - cp := filepath.Join(ip, md.Name()) - m := fs.convertToResourceInfo(ctx, md, cp, fs.toStoragePath(ctx, cp), c, mdKeys) - finfos = append(finfos, m) - } - return finfos, nil -} - -func (fs *ocfs) listWithHome(ctx context.Context, home, p string, mdKeys []string) ([]*provider.ResourceInfo, error) { - log := appctx.GetLogger(ctx) - if p == home { - log.Debug().Msg("listing home") - return fs.listHome(ctx, home, mdKeys) - } - - if fs.isShareFolderRoot(p) { - log.Debug().Msg("listing share folder root") - return fs.listShareFolderRoot(ctx, p, mdKeys) - } - - if fs.isShareFolderChild(p) { - return nil, errtypes.PermissionDenied("ocfs: error listing folders inside the shared folder, only file references are stored inside") - } - - log.Debug().Msg("listing nominal home") - return fs.listWithNominalHome(ctx, p, mdKeys) -} - -func (fs *ocfs) listHome(ctx context.Context, home string, mdKeys []string) ([]*provider.ResourceInfo, error) { - // list files - ip := fs.toInternalPath(ctx, home) - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - mds, err := ioutil.ReadDir(ip) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error listing files") - } - - c := fs.pool.Get() - defer c.Close() - - finfos := []*provider.ResourceInfo{} - for _, md := range mds { - cp := filepath.Join(ip, md.Name()) - m := fs.convertToResourceInfo(ctx, md, cp, fs.toStoragePath(ctx, cp), c, mdKeys) - finfos = append(finfos, m) - } - - // list shadow_files - ip = fs.toInternalShadowPath(ctx, home) - mds, err = ioutil.ReadDir(ip) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error listing shadow_files") - } - for _, md := range mds { - cp := filepath.Join(ip, md.Name()) - m := fs.convertToResourceInfo(ctx, md, cp, fs.toStorageShadowPath(ctx, cp), c, mdKeys) - finfos = append(finfos, m) - } - return finfos, nil -} - -func (fs *ocfs) listShareFolderRoot(ctx context.Context, sp string, mdKeys []string) ([]*provider.ResourceInfo, error) { - ip := fs.toInternalShadowPath(ctx, sp) - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - mds, err := ioutil.ReadDir(ip) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error listing shadow_files") - } - - c := fs.pool.Get() - defer c.Close() - - finfos := []*provider.ResourceInfo{} - for _, md := range mds { - cp := filepath.Join(ip, md.Name()) - m := fs.convertToResourceInfo(ctx, md, cp, fs.toStorageShadowPath(ctx, cp), c, mdKeys) - m.Type = provider.ResourceType_RESOURCE_TYPE_REFERENCE - ref, err := xattr.Get(cp, mdPrefix+"target") - if err != nil { - return nil, err - } - m.Target = string(ref) - finfos = append(finfos, m) - } - - return finfos, nil -} - -func (fs *ocfs) archiveRevision(ctx context.Context, vbp string, ip string) error { - // move existing file to versions dir - vp := fmt.Sprintf("%s.v%d", vbp, time.Now().Unix()) - if err := os.MkdirAll(filepath.Dir(vp), 0700); err != nil { - return errors.Wrap(err, "ocfs: error creating versions dir "+vp) - } - - // TODO(jfd): make sure rename is atomic, missing fsync ... - if err := os.Rename(ip, vp); err != nil { - return errors.Wrap(err, "ocfs: error renaming from "+ip+" to "+vp) - } - - return nil -} - -func (fs *ocfs) copyMD(s string, t string) (err error) { - var attrs []string - if attrs, err = xattr.List(s); err != nil { - return err - } - for i := range attrs { - if strings.HasPrefix(attrs[i], ocPrefix) { - var d []byte - if d, err = xattr.Get(s, attrs[i]); err != nil { - return err - } - if err = xattr.Set(t, attrs[i], d); err != nil { - return err - } - } - } - return nil -} - -func (fs *ocfs) Download(ctx context.Context, ref *provider.Reference) (io.ReadCloser, error) { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.InitiateFileDownload { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - r, err := os.Open(ip) - if err != nil { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, ip)) - } - return nil, errors.Wrap(err, "ocfs: error reading "+ip) - } - return r, nil -} - -func (fs *ocfs) ListRevisions(ctx context.Context, ref *provider.Reference) ([]*provider.FileVersion, error) { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListFileVersions { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - vp := fs.getVersionsPath(ctx, ip) - - bn := filepath.Base(ip) - - revisions := []*provider.FileVersion{} - mds, err := ioutil.ReadDir(filepath.Dir(vp)) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error reading"+filepath.Dir(vp)) - } - for i := range mds { - rev := fs.filterAsRevision(ctx, bn, mds[i]) - if rev != nil { - revisions = append(revisions, rev) - } - - } - return revisions, nil -} - -func (fs *ocfs) filterAsRevision(ctx context.Context, bn string, md os.FileInfo) *provider.FileVersion { - if strings.HasPrefix(md.Name(), bn) { - // versions have filename.ext.v12345678 - version := md.Name()[len(bn)+2:] // truncate ".v" to get version mtime - mtime, err := strconv.Atoi(version) - if err != nil { - log := appctx.GetLogger(ctx) - log.Error().Err(err).Str("path", md.Name()).Msg("invalid version mtime") - return nil - } - // TODO(jfd) trashed versions are in the files_trashbin/versions folder ... not relevant here - return &provider.FileVersion{ - Key: version, - Size: uint64(md.Size()), - Mtime: uint64(mtime), - Etag: calcEtag(ctx, md), - } - } - return nil -} - -func (fs *ocfs) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (io.ReadCloser, error) { - return nil, errtypes.NotSupported("download revision") -} - -func (fs *ocfs) RestoreRevision(ctx context.Context, ref *provider.Reference, revisionKey string) error { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return errors.Wrap(err, "ocfs: error resolving reference") - } - - // check permissions - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.RestoreFileVersion { - return errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return errors.Wrap(err, "ocfs: error reading permissions") - } - - vp := fs.getVersionsPath(ctx, ip) - rp := vp + ".v" + revisionKey - - // check revision exists - rs, err := os.Stat(rp) - if err != nil { - return err - } - - if !rs.Mode().IsRegular() { - return fmt.Errorf("%s is not a regular file", rp) - } - - source, err := os.Open(rp) - if err != nil { - return err - } - defer source.Close() - - // destination should be available, otherwise we could not have navigated to its revisions - if err := fs.archiveRevision(ctx, fs.getVersionsPath(ctx, ip), ip); err != nil { - return err - } - - destination, err := os.Create(ip) - if err != nil { - // TODO(jfd) bring back revision in case sth goes wrong? - return err - } - defer destination.Close() - - _, err = io.Copy(destination, source) - - if err != nil { - return err - } - - // TODO(jfd) bring back revision in case sth goes wrong? - return fs.propagate(ctx, ip) -} - -func (fs *ocfs) PurgeRecycleItem(ctx context.Context, ref *provider.Reference, key, relativePath string) error { - rp, err := fs.getRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "ocfs: error resolving recycle path") - } - ip := filepath.Join(rp, filepath.Clean(key)) - // TODO check permission? - - // check permissions - /* are they stored in the trash? - if perm, err := fs.readPermissions(ctx, ip); err == nil { - if !perm.ListContainer { - return nil, errtypes.PermissionDenied("") - } - } else { - if isNotFound(err) { - return nil, errtypes.NotFound(fs.unwrap(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - */ - - err = os.Remove(ip) - if err != nil { - return errors.Wrap(err, "ocfs: error deleting recycle item") - } - err = os.RemoveAll(filepath.Join(filepath.Dir(rp), "versions", filepath.Clean(key))) - if err != nil { - return errors.Wrap(err, "ocfs: error deleting recycle item versions") - } - // TODO delete keyfiles, keys, share-keys - return nil -} - -func (fs *ocfs) EmptyRecycle(ctx context.Context, ref *provider.Reference) error { - // TODO check permission? on what? user must be the owner - rp, err := fs.getRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "ocfs: error resolving recycle path") - } - err = os.RemoveAll(rp) - if err != nil { - return errors.Wrap(err, "ocfs: error deleting recycle files") - } - err = os.RemoveAll(filepath.Join(filepath.Dir(rp), "versions")) - if err != nil { - return errors.Wrap(err, "ocfs: error deleting recycle files versions") - } - // TODO delete keyfiles, keys, share-keys ... or just everything? - return nil -} - -func (fs *ocfs) convertToRecycleItem(ctx context.Context, rp string, md os.FileInfo) *provider.RecycleItem { - // trashbin items have filename.ext.d12345678 - suffix := filepath.Ext(md.Name()) - if len(suffix) == 0 || !strings.HasPrefix(suffix, ".d") { - log := appctx.GetLogger(ctx) - log.Error().Str("path", md.Name()).Msg("invalid trash item suffix") - return nil - } - trashtime := suffix[2:] // truncate "d" to get trashbin time - ttime, err := strconv.Atoi(trashtime) - if err != nil { - log := appctx.GetLogger(ctx) - log.Error().Err(err).Str("path", md.Name()).Msg("invalid trash time") - return nil - } - var v []byte - if v, err = xattr.Get(filepath.Join(rp, md.Name()), trashOriginPrefix); err != nil { - log := appctx.GetLogger(ctx) - log.Error().Err(err).Str("path", md.Name()).Msg("could not read origin") - return nil - } - // ownCloud 10 stores the parent dir of the deleted item as the location in the oc_files_trashbin table - // we use extended attributes for original location, but also only the parent location, which is why - // we need to join and trim the path when listing it - originalPath := filepath.Join(string(v), strings.TrimSuffix(filepath.Base(md.Name()), suffix)) - - return &provider.RecycleItem{ - Type: getResourceType(md.IsDir()), - Key: md.Name(), - // TODO do we need to prefix the path? it should be relative to this storage root, right? - Ref: &provider.Reference{ - Path: originalPath, - }, - Size: uint64(md.Size()), - DeletionTime: &types.Timestamp{ - Seconds: uint64(ttime), - // no nanos available - }, - } -} - -func (fs *ocfs) ListRecycle(ctx context.Context, ref *provider.Reference, key, relativePath string) ([]*provider.RecycleItem, error) { - // TODO check permission? on what? user must be the owner? - rp, err := fs.getRecyclePath(ctx) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error resolving recycle path") - } - - // list files folder - mds, err := ioutil.ReadDir(filepath.Join(rp, key)) - if err != nil { - log := appctx.GetLogger(ctx) - log.Debug().Err(err).Str("path", rp).Msg("trash not readable") - // TODO jfd only ignore not found errors - return []*provider.RecycleItem{}, nil - } - // TODO (jfd) limit and offset - items := []*provider.RecycleItem{} - for i := range mds { - ri := fs.convertToRecycleItem(ctx, rp, mds[i]) - if ri != nil { - items = append(items, ri) - } - - } - return items, nil -} - -func (fs *ocfs) RestoreRecycleItem(ctx context.Context, ref *provider.Reference, key, relativePath string, restoreRef *provider.Reference) error { - // TODO check permission? on what? user must be the owner? - log := appctx.GetLogger(ctx) - rp, err := fs.getRecyclePath(ctx) - if err != nil { - return errors.Wrap(err, "ocfs: error resolving recycle path") - } - src := filepath.Join(rp, filepath.Clean(key)) - - suffix := filepath.Ext(src) - if len(suffix) == 0 || !strings.HasPrefix(suffix, ".d") { - log.Error().Str("key", key).Str("path", src).Msg("invalid trash item suffix") - return nil - } - - if restoreRef == nil { - restoreRef = &provider.Reference{} - } - if restoreRef.Path == "" { - v, err := xattr.Get(src, trashOriginPrefix) - if err != nil { - log.Error().Err(err).Str("key", key).Str("path", src).Msg("could not read origin") - } - restoreRef.Path = filepath.Join("/", filepath.Clean(string(v)), strings.TrimSuffix(filepath.Base(src), suffix)) - } - tgt := fs.toInternalPath(ctx, restoreRef.Path) - // move back to original location - if err := os.Rename(src, tgt); err != nil { - log.Error().Err(err).Str("key", key).Str("restorePath", restoreRef.Path).Str("src", src).Str("tgt", tgt).Msg("could not restore item") - return errors.Wrap(err, "ocfs: could not restore item") - } - // unset trash origin location in metadata - if err := xattr.Remove(tgt, trashOriginPrefix); err != nil { - // just a warning, will be overwritten next time it is deleted - log.Warn().Err(err).Str("key", key).Str("tgt", tgt).Msg("could not unset origin") - } - // TODO(jfd) restore versions - - return fs.propagate(ctx, tgt) -} - -func (fs *ocfs) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter) ([]*provider.StorageSpace, error) { - - var ( - spaceType = spaceTypeAny - // spaceID = spaceIDAny - // nodeID = spaceIDAny - err error - ) - - for i := range filter { - switch filter[i].Type { - case provider.ListStorageSpacesRequest_Filter_TYPE_SPACE_TYPE: - spaceType = filter[i].GetSpaceType() - case provider.ListStorageSpacesRequest_Filter_TYPE_ID: - // spaceID, nodeID = utils.SplitStorageSpaceID(filter[i].GetId().OpaqueId) - } - } - - spaces := []*provider.StorageSpace{} - if spaceType != spaceTypeAny && spaceType != "personal" { - // owncloud only has personal spaces - // TODO implement external spaces? - return spaces, nil - } - - // all folders with a files folder could be a personal space - matches, err := filepath.Glob(filepath.Join(fs.c.DataDirectory, "*", "files")) - if err != nil { - return nil, err - } - - for i := range matches { - - id := readOrCreateID(context.Background(), matches[i], nil) - space := &provider.StorageSpace{ - Id: &provider.StorageSpaceId{OpaqueId: id}, - // Owner: , // TODO from path layout? - // Root: , //? - } - spaces = append(spaces, space) - } - - // FIXME: The linter doesn't like empty branches - // if len(matches) == 0 && nodeID != spaceID { - // TODO lookup by id - // } - - return spaces, nil -} - -// UpdateStorageSpace updates a storage space -func (fs *ocfs) UpdateStorageSpace(ctx context.Context, req *provider.UpdateStorageSpaceRequest) (*provider.UpdateStorageSpaceResponse, error) { - return nil, errtypes.NotSupported("update storage space") -} - -// DeleteStorageSpace deletes a storage space -func (fs *ocfs) DeleteStorageSpace(ctx context.Context, req *provider.DeleteStorageSpaceRequest) error { - return errtypes.NotSupported("delete storage space") -} - -func (fs *ocfs) propagate(ctx context.Context, leafPath string) error { - var root string - if fs.c.EnableHome { - root = fs.toInternalPath(ctx, "/") - } else { - owner := fs.getOwner(leafPath) - root = fs.toInternalPath(ctx, owner) - } - if !strings.HasPrefix(leafPath, root) { - err := errors.New("internal path outside root") - appctx.GetLogger(ctx).Error(). - Err(err). - Str("leafPath", leafPath). - Str("root", root). - Msg("could not propagate change") - return err - } - - fi, err := os.Stat(leafPath) - if err != nil { - appctx.GetLogger(ctx).Error(). - Err(err). - Str("leafPath", leafPath). - Str("root", root). - Msg("could not propagate change") - return err - } - - parts := strings.Split(strings.TrimPrefix(leafPath, root), "/") - // root never ends in / so the split returns an empty first element, which we can skip - // we do not need to chmod the last element because it is the leaf path (< and not <= comparison) - for i := 1; i < len(parts); i++ { - appctx.GetLogger(ctx).Debug(). - Str("leafPath", leafPath). - Str("root", root). - Int("i", i). - Interface("parts", parts). - Msg("propagating change") - if err := os.Chtimes(filepath.Join(root), fi.ModTime(), fi.ModTime()); err != nil { - appctx.GetLogger(ctx).Error(). - Err(err). - Str("leafPath", leafPath). - Str("root", root). - Msg("could not propagate change") - return err - } - root = filepath.Join(root, parts[i]) - } - return nil -} - -func readChecksumIntoResourceChecksum(ctx context.Context, nodePath, algo string, ri *provider.ResourceInfo) { - v, err := xattr.Get(nodePath, checksumPrefix+algo) - log := appctx.GetLogger(ctx). - Debug(). - Err(err). - Str("nodepath", nodePath). - Str("algorithm", algo) - switch { - case err == nil: - ri.Checksum = &provider.ResourceChecksum{ - Type: storageprovider.PKG2GRPCXS(algo), - Sum: hex.EncodeToString(v), - } - case isNoData(err): - log.Msg("checksum not set") - case isNotFound(err): - log.Msg("file not found") - default: - log.Msg("could not read checksum") - } -} - -func readChecksumIntoOpaque(ctx context.Context, nodePath, algo string, ri *provider.ResourceInfo) { - v, err := xattr.Get(nodePath, checksumPrefix+algo) - log := appctx.GetLogger(ctx). - Debug(). - Err(err). - Str("nodepath", nodePath). - Str("algorithm", algo) - switch { - case err == nil: - if ri.Opaque == nil { - ri.Opaque = &types.Opaque{ - Map: map[string]*types.OpaqueEntry{}, - } - } - ri.Opaque.Map[algo] = &types.OpaqueEntry{ - Decoder: "plain", - Value: []byte(hex.EncodeToString(v)), - } - case isNoData(err): - log.Msg("checksum not set") - case isNotFound(err): - log.Msg("file not found") - default: - log.Msg("could not read checksum") - } -} - -// TODO propagate etag and mtime or append event to history? propagate on disk ... -// - but propagation is a separate task. only if upload was successful ... diff --git a/pkg/storage/fs/owncloud/owncloud_unix.go b/pkg/storage/fs/owncloud/owncloud_unix.go deleted file mode 100755 index 61cc65433d..0000000000 --- a/pkg/storage/fs/owncloud/owncloud_unix.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -//go:build !windows -// +build !windows - -package owncloud - -import ( - "context" - "crypto/md5" - "encoding/binary" - "fmt" - "os" - "strings" - "syscall" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" -) - -// TODO(jfd) get rid of the differences between unix and windows. the inode and dev should never be used for the etag because it interferes with backups - -// calcEtag will create an etag based on the md5 of -// - mtime, -// - inode (if available), -// - device (if available) and -// - size. -// errors are logged, but an etag will still be returned -func calcEtag(ctx context.Context, fi os.FileInfo) string { - log := appctx.GetLogger(ctx) - h := md5.New() - err := binary.Write(h, binary.BigEndian, fi.ModTime().UnixNano()) - if err != nil { - log.Error().Err(err).Msg("error writing mtime") - } - stat, ok := fi.Sys().(*syscall.Stat_t) - if ok { - // take device and inode into account - err = binary.Write(h, binary.BigEndian, stat.Ino) - if err != nil { - log.Error().Err(err).Msg("error writing inode") - } - err = binary.Write(h, binary.BigEndian, stat.Dev) - if err != nil { - log.Error().Err(err).Msg("error writing device") - } - } - err = binary.Write(h, binary.BigEndian, fi.Size()) - if err != nil { - log.Error().Err(err).Msg("error writing size") - } - etag := fmt.Sprintf(`"%x"`, h.Sum(nil)) - return fmt.Sprintf("\"%s\"", strings.Trim(etag, "\"")) -} - -func (fs *ocfs) GetQuota(ctx context.Context, ref *provider.Reference) (uint64, uint64, error) { - // TODO quota of which storage space? - // we could use the logged in user, but when a user has access to multiple storages this falls short - // for now return quota of root - stat := syscall.Statfs_t{} - err := syscall.Statfs(fs.toInternalPath(ctx, "/"), &stat) - if err != nil { - return 0, 0, err - } - total := stat.Blocks * uint64(stat.Bsize) // Total data blocks in filesystem - used := (stat.Blocks - stat.Bavail) * uint64(stat.Bsize) // Free blocks available to unprivileged user - return total, used, nil -} diff --git a/pkg/storage/fs/owncloud/owncloud_windows.go b/pkg/storage/fs/owncloud/owncloud_windows.go deleted file mode 100644 index 80e86e4cd4..0000000000 --- a/pkg/storage/fs/owncloud/owncloud_windows.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -//go:build windows -// +build windows - -package owncloud - -import ( - "context" - "crypto/md5" - "encoding/binary" - "fmt" - "os" - "strings" - - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - "golang.org/x/sys/windows" -) - -// calcEtag will create an etag based on the md5 of -// - mtime, -// - inode (if available), -// - device (if available) and -// - size. -// errors are logged, but an etag will still be returned -func calcEtag(ctx context.Context, fi os.FileInfo) string { - log := appctx.GetLogger(ctx) - h := md5.New() - err := binary.Write(h, binary.BigEndian, fi.ModTime().UnixNano()) - if err != nil { - log.Error().Err(err).Msg("error writing mtime") - } - // device and inode have no meaning on windows - err = binary.Write(h, binary.BigEndian, fi.Size()) - if err != nil { - log.Error().Err(err).Msg("error writing size") - } - etag := fmt.Sprintf(`"%x"`, h.Sum(nil)) - return fmt.Sprintf("\"%s\"", strings.Trim(etag, "\"")) -} - -func (fs *ocfs) GetQuota(ctx context.Context, ref *provider.Reference) (uint64, uint64, error) { - // TODO quota of which storage space? - // we could use the logged in user, but when a user has access to multiple storages this falls short - // for now return quota of root - var free, total, avail uint64 - - pathPtr, err := windows.UTF16PtrFromString(fs.toInternalPath(ctx, "/")) - if err != nil { - return 0, 0, err - } - err = windows.GetDiskFreeSpaceEx(pathPtr, &avail, &total, &free) - if err != nil { - return 0, 0, err - } - - used := total - free - return total, used, nil -} diff --git a/pkg/storage/fs/owncloud/upload.go b/pkg/storage/fs/owncloud/upload.go deleted file mode 100644 index 69a48d25d8..0000000000 --- a/pkg/storage/fs/owncloud/upload.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2018-2021 CERN -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// In applying this license, CERN does not waive the privileges and immunities -// granted to it by virtue of its status as an Intergovernmental Organization -// or submit itself to any jurisdiction. - -package owncloud - -import ( - "context" - "crypto/md5" - "crypto/sha1" - "encoding/hex" - "encoding/json" - "fmt" - "hash/adler32" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" - provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" - "github.com/cs3org/reva/pkg/appctx" - ctxpkg "github.com/cs3org/reva/pkg/ctx" - "github.com/cs3org/reva/pkg/errtypes" - "github.com/cs3org/reva/pkg/logger" - "github.com/cs3org/reva/pkg/storage/utils/chunking" - "github.com/cs3org/reva/pkg/storage/utils/templates" - "github.com/cs3org/reva/pkg/utils" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/pkg/xattr" - "github.com/rs/zerolog" - tusd "github.com/tus/tusd/pkg/handler" -) - -var defaultFilePerm = os.FileMode(0664) - -func (fs *ocfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) error { - upload, err := fs.GetUpload(ctx, ref.GetPath()) - if err != nil { - return errors.Wrap(err, "ocfs: error retrieving upload") - } - - uploadInfo := upload.(*fileUpload) - - p := uploadInfo.info.Storage["InternalDestination"] - ok, err := chunking.IsChunked(p) - if err != nil { - return errors.Wrap(err, "ocfs: error checking path") - } - if ok { - var assembledFile string - p, assembledFile, err = fs.chunkHandler.WriteChunk(p, r) - if err != nil { - return err - } - if p == "" { - if err = uploadInfo.Terminate(ctx); err != nil { - return errors.Wrap(err, "ocfs: error removing auxiliary files") - } - return errtypes.PartialContent(ref.String()) - } - uploadInfo.info.Storage["InternalDestination"] = p - fd, err := os.Open(assembledFile) - if err != nil { - return errors.Wrap(err, "ocfs: error opening assembled file") - } - defer fd.Close() - defer os.RemoveAll(assembledFile) - r = fd - } - - if _, err := uploadInfo.WriteChunk(ctx, 0, r); err != nil { - return errors.Wrap(err, "ocfs: error writing to binary file") - } - - return uploadInfo.FinishUpload(ctx) -} - -// InitiateUpload returns upload ids corresponding to different protocols it supports -// TODO read optional content for small files in this request -func (fs *ocfs) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, metadata map[string]string) (map[string]string, error) { - ip, err := fs.resolve(ctx, ref) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error resolving reference") - } - - // permissions are checked in NewUpload below - - p := fs.toStoragePath(ctx, ip) - - info := tusd.FileInfo{ - MetaData: tusd.MetaData{ - "filename": filepath.Base(p), - "dir": filepath.Dir(p), - }, - Size: uploadLength, - } - - if metadata != nil { - if metadata["mtime"] != "" { - info.MetaData["mtime"] = metadata["mtime"] - } - if _, ok := metadata["sizedeferred"]; ok { - info.SizeIsDeferred = true - } - } - - upload, err := fs.NewUpload(ctx, info) - if err != nil { - return nil, err - } - - info, _ = upload.GetInfo(ctx) - - return map[string]string{ - "simple": info.ID, - "tus": info.ID, - }, nil -} - -// UseIn tells the tus upload middleware which extensions it supports. -func (fs *ocfs) UseIn(composer *tusd.StoreComposer) { - composer.UseCore(fs) - composer.UseTerminater(fs) - composer.UseConcater(fs) - composer.UseLengthDeferrer(fs) -} - -// To implement the core tus.io protocol as specified in https://tus.io/protocols/resumable-upload.html#core-protocol -// - the storage needs to implement NewUpload and GetUpload -// - the upload needs to implement the tusd.Upload interface: WriteChunk, GetInfo, GetReader and FinishUpload - -func (fs *ocfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd.Upload, err error) { - - log := appctx.GetLogger(ctx) - log.Debug().Interface("info", info).Msg("ocfs: NewUpload") - - if info.MetaData["filename"] == "" { - return nil, errors.New("ocfs: missing filename in metadata") - } - info.MetaData["filename"] = filepath.Clean(info.MetaData["filename"]) - - dir := info.MetaData["dir"] - if dir == "" { - return nil, errors.New("ocfs: missing dir in metadata") - } - info.MetaData["dir"] = filepath.Clean(info.MetaData["dir"]) - - ip := fs.toInternalPath(ctx, filepath.Join(info.MetaData["dir"], info.MetaData["filename"])) - - // check permissions - var perm *provider.ResourcePermissions - var perr error - // if destination exists - if _, err := os.Stat(ip); err == nil { - // check permissions of file to be overwritten - perm, perr = fs.readPermissions(ctx, ip) - } else { - // check permissions of parent folder - perm, perr = fs.readPermissions(ctx, filepath.Dir(ip)) - } - if perr == nil { - if !perm.InitiateFileUpload { - return nil, errtypes.PermissionDenied("") - } - } else { - if os.IsNotExist(err) { - return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) - } - return nil, errors.Wrap(err, "ocfs: error reading permissions") - } - - log.Debug().Interface("info", info).Msg("ocfs: resolved filename") - - info.ID = uuid.New().String() - - binPath, err := fs.getUploadPath(ctx, info.ID) - if err != nil { - return nil, errors.Wrap(err, "ocfs: error resolving upload path") - } - usr := ctxpkg.ContextMustGetUser(ctx) - info.Storage = map[string]string{ - "Type": "OwnCloudStore", - "BinPath": binPath, - "InternalDestination": ip, - - "Idp": usr.Id.Idp, - "UserId": usr.Id.OpaqueId, - "UserType": utils.UserTypeToString(usr.Id.Type), - "UserName": usr.Username, - - "LogLevel": log.GetLevel().String(), - } - // Create binary file in the upload folder with no content - log.Debug().Interface("info", info).Msg("ocfs: built storage info") - file, err := os.OpenFile(binPath, os.O_CREATE|os.O_WRONLY, defaultFilePerm) - if err != nil { - return nil, err - } - defer file.Close() - - u := &fileUpload{ - info: info, - binPath: binPath, - infoPath: filepath.Join(fs.c.UploadInfoDir, info.ID+".info"), - fs: fs, - ctx: ctx, - } - - // writeInfo creates the file by itself if necessary - err = u.writeInfo() - if err != nil { - return nil, err - } - - return u, nil -} - -func (fs *ocfs) getUploadPath(ctx context.Context, uploadID string) (string, error) { - u, ok := ctxpkg.ContextGetUser(ctx) - if !ok { - err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") - return "", err - } - layout := templates.WithUser(u, fs.c.UserLayout) - return filepath.Join(fs.c.DataDirectory, layout, "uploads", uploadID), nil -} - -// GetUpload returns the Upload for the given upload id -func (fs *ocfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) { - infoPath := filepath.Join(fs.c.UploadInfoDir, filepath.Join("/", id+".info")) - - info := tusd.FileInfo{} - data, err := ioutil.ReadFile(infoPath) - if err != nil { - return nil, err - } - if err := json.Unmarshal(data, &info); err != nil { - return nil, err - } - - stat, err := os.Stat(info.Storage["BinPath"]) - if err != nil { - return nil, err - } - - info.Offset = stat.Size() - - u := &userpb.User{ - Id: &userpb.UserId{ - Idp: info.Storage["Idp"], - OpaqueId: info.Storage["UserId"], - Type: utils.UserTypeMap(info.Storage["UserType"]), - }, - Username: info.Storage["UserName"], - } - - ctx = ctxpkg.ContextSetUser(ctx, u) - // TODO configure the logger the same way ... store and add traceid in file info - - var opts []logger.Option - opts = append(opts, logger.WithLevel(info.Storage["LogLevel"])) - opts = append(opts, logger.WithWriter(os.Stderr, logger.ConsoleMode)) - l := logger.New(opts...) - - sub := l.With().Int("pid", os.Getpid()).Logger() - - ctx = appctx.WithLogger(ctx, &sub) - - return &fileUpload{ - info: info, - binPath: info.Storage["BinPath"], - infoPath: infoPath, - fs: fs, - ctx: ctx, - }, nil -} - -type fileUpload struct { - // info stores the current information about the upload - info tusd.FileInfo - // infoPath is the path to the .info file - infoPath string - // binPath is the path to the binary file (which has no extension) - binPath string - // only fs knows how to handle metadata and versions - fs *ocfs - // a context with a user - // TODO add logger as well? - ctx context.Context -} - -// GetInfo returns the FileInfo -func (upload *fileUpload) GetInfo(ctx context.Context) (tusd.FileInfo, error) { - return upload.info, nil -} - -// WriteChunk writes the stream from the reader to the given offset of the upload -func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) { - file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm) - if err != nil { - return 0, err - } - defer file.Close() - - n, err := io.Copy(file, src) - - // If the HTTP PATCH request gets interrupted in the middle (e.g. because - // the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF. - // However, for OwnCloudStore it's not important whether the stream has ended - // on purpose or accidentally. - if err != nil { - if err != io.ErrUnexpectedEOF { - return n, err - } - } - - upload.info.Offset += n - err = upload.writeInfo() // TODO info is written here ... we need to truncate in DiscardChunk - - return n, err -} - -// GetReader returns an io.Reader for the upload -func (upload *fileUpload) GetReader(ctx context.Context) (io.Reader, error) { - return os.Open(upload.binPath) -} - -// writeInfo updates the entire information. Everything will be overwritten. -func (upload *fileUpload) writeInfo() error { - data, err := json.Marshal(upload.info) - if err != nil { - return err - } - return ioutil.WriteFile(upload.infoPath, data, defaultFilePerm) -} - -// FinishUpload finishes an upload and moves the file to the internal destination -func (upload *fileUpload) FinishUpload(ctx context.Context) error { - log := appctx.GetLogger(upload.ctx) - - sha1Sum := make([]byte, 0, 32) - md5Sum := make([]byte, 0, 32) - adler32Sum := make([]byte, 0, 32) - { - sha1h := sha1.New() - md5h := md5.New() - adler32h := adler32.New() - f, err := os.Open(upload.binPath) - if err != nil { - log.Err(err).Msg("Decomposedfs: could not open file for checksumming") - // we can continue if no oc checksum header is set - } - defer f.Close() - - r1 := io.TeeReader(f, sha1h) - r2 := io.TeeReader(r1, md5h) - - if _, err := io.Copy(adler32h, r2); err != nil { - log.Err(err).Msg("Decomposedfs: could not copy bytes for checksumming") - } - - sha1Sum = sha1h.Sum(sha1Sum) - md5Sum = md5h.Sum(md5Sum) - adler32Sum = adler32h.Sum(adler32Sum) - } - - if upload.info.MetaData["checksum"] != "" { - parts := strings.SplitN(upload.info.MetaData["checksum"], " ", 2) - if len(parts) != 2 { - return errtypes.BadRequest("invalid checksum format. must be '[algorithm] [checksum]'") - } - var err error - switch parts[0] { - case "sha1": - err = upload.checkHash(parts[1], sha1Sum) - case "md5": - err = upload.checkHash(parts[1], md5Sum) - case "adler32": - err = upload.checkHash(parts[1], adler32Sum) - default: - err = errtypes.BadRequest("unsupported checksum algorithm: " + parts[0]) - } - if err != nil { - return err - } - } - - ip := upload.info.Storage["InternalDestination"] - - // if destination exists - // TODO check etag with If-Match header - if _, err := os.Stat(ip); err == nil { - // copy attributes of existing file to tmp file - if err := upload.fs.copyMD(ip, upload.binPath); err != nil { - return errors.Wrap(err, "ocfs: error copying metadata from "+ip+" to "+upload.binPath) - } - // create revision - if err := upload.fs.archiveRevision(upload.ctx, upload.fs.getVersionsPath(upload.ctx, ip), ip); err != nil { - return err - } - } - - err := os.Rename(upload.binPath, ip) - if err != nil { - log.Err(err).Interface("info", upload.info). - Str("binPath", upload.binPath). - Str("ipath", ip). - Msg("ocfs: could not rename") - return err - } - - // only delete the upload if it was successfully written to the storage - if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { - log.Err(err).Interface("info", upload.info).Msg("ocfs: could not delete upload info") - return err - } - } - - if upload.info.MetaData["mtime"] != "" { - err := upload.fs.setMtime(ctx, ip, upload.info.MetaData["mtime"]) - if err != nil { - log.Err(err).Interface("info", upload.info).Msg("ocfs: could not set mtime metadata") - return err - } - } - - // now try write all checksums - tryWritingChecksum(log, ip, "sha1", sha1Sum) - tryWritingChecksum(log, ip, "md5", md5Sum) - tryWritingChecksum(log, ip, "adler32", adler32Sum) - - return upload.fs.propagate(upload.ctx, ip) -} - -// To implement the termination extension as specified in https://tus.io/protocols/resumable-upload.html#termination -// - the storage needs to implement AsTerminatableUpload -// - the upload needs to implement Terminate - -// AsTerminatableUpload returns a TerminatableUpload -func (fs *ocfs) AsTerminatableUpload(upload tusd.Upload) tusd.TerminatableUpload { - return upload.(*fileUpload) -} - -// Terminate terminates the upload -func (upload *fileUpload) Terminate(ctx context.Context) error { - if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { - return err - } - } - if err := os.Remove(upload.binPath); err != nil { - if !os.IsNotExist(err) { - return err - } - } - return nil -} - -// To implement the creation-defer-length extension as specified in https://tus.io/protocols/resumable-upload.html#creation -// - the storage needs to implement AsLengthDeclarableUpload -// - the upload needs to implement DeclareLength - -// AsLengthDeclarableUpload returns a LengthDeclarableUpload -func (fs *ocfs) AsLengthDeclarableUpload(upload tusd.Upload) tusd.LengthDeclarableUpload { - return upload.(*fileUpload) -} - -// DeclareLength updates the upload length information -func (upload *fileUpload) DeclareLength(ctx context.Context, length int64) error { - upload.info.Size = length - upload.info.SizeIsDeferred = false - return upload.writeInfo() -} - -// To implement the concatenation extension as specified in https://tus.io/protocols/resumable-upload.html#concatenation -// - the storage needs to implement AsConcatableUpload -// - the upload needs to implement ConcatUploads - -// AsConcatableUpload returns a ConcatableUpload -func (fs *ocfs) AsConcatableUpload(upload tusd.Upload) tusd.ConcatableUpload { - return upload.(*fileUpload) -} - -// ConcatUploads concatenates multiple uploads -func (upload *fileUpload) ConcatUploads(ctx context.Context, uploads []tusd.Upload) (err error) { - file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm) - if err != nil { - return err - } - defer file.Close() - - for _, partialUpload := range uploads { - fileUpload := partialUpload.(*fileUpload) - - src, err := os.Open(fileUpload.binPath) - if err != nil { - return err - } - - if _, err := io.Copy(file, src); err != nil { - return err - } - } - - return -} - -func (upload *fileUpload) checkHash(expected string, h []byte) error { - if expected != hex.EncodeToString(h) { - upload.discardChunk() - return errtypes.ChecksumMismatch(fmt.Sprintf("invalid checksum: expected %s got %x", upload.info.MetaData["checksum"], h)) - } - return nil -} - -func (upload *fileUpload) discardChunk() { - if err := os.Remove(upload.binPath); err != nil { - if !os.IsNotExist(err) { - appctx.GetLogger(upload.ctx).Err(err).Interface("info", upload.info).Str("binPath", upload.binPath).Interface("info", upload.info).Msg("Decomposedfs: could not discard chunk") - return - } - } - if err := os.Remove(upload.infoPath); err != nil { - if !os.IsNotExist(err) { - appctx.GetLogger(upload.ctx).Err(err).Interface("info", upload.info).Str("infoPath", upload.infoPath).Interface("info", upload.info).Msg("Decomposedfs: could not discard chunk info") - return - } - } -} - -func tryWritingChecksum(log *zerolog.Logger, path, algo string, h []byte) { - if err := xattr.Set(path, checksumPrefix+algo, h); err != nil { - log.Err(err). - Str("csType", algo). - Bytes("hash", h). - Msg("ocfs: could not write checksum") - } -} From bb41da2493790f68637fffdb2432f7cfaa9b37cf Mon Sep 17 00:00:00 2001 From: Willy Kloucek Date: Tue, 1 Feb 2022 11:04:07 +0100 Subject: [PATCH 2/2] remove owncloud storage driver integration tests and demo config files --- examples/oc-phoenix/storage-home.toml | 44 ------------------- examples/oc-phoenix/storage-oc.toml | 34 -------------- .../fixtures/storageprovider-owncloud.toml | 12 ----- .../integration/grpc/storageprovider_test.go | 26 ++--------- 4 files changed, 3 insertions(+), 113 deletions(-) delete mode 100644 examples/oc-phoenix/storage-home.toml delete mode 100644 examples/oc-phoenix/storage-oc.toml delete mode 100644 tests/integration/grpc/fixtures/storageprovider-owncloud.toml diff --git a/examples/oc-phoenix/storage-home.toml b/examples/oc-phoenix/storage-home.toml deleted file mode 100644 index 3ed223f412..0000000000 --- a/examples/oc-phoenix/storage-home.toml +++ /dev/null @@ -1,44 +0,0 @@ -# This storage-home.toml config file will start a reva service that: -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "localhost:19000" - -# - authenticates grpc storage provider requests using the internal jwt token -# - authenticates http upload and download requests requests using basic auth -# - serves the home storage provider on grpc port 12000 -# - serves http dataprovider for this storage on port 12001 -# - /data - dataprovider: file up and download -# -# The home storage will inject the username into the path and jail users into -# their home directory - -[grpc] -address = "0.0.0.0:12000" - -# This is a storage provider that grants direct access to the wrapped storage -# TODO same storage id as the /oc/ storage provider -# if we have an id, we can directly go to that storage, no need to wrap paths -# we have a locally running dataprovider -# this is where clients can find it -# the context path wrapper reads tho username from the context and prefixes the relative storage path with it -[grpc.services.storageprovider] -driver = "owncloud" -expose_data_server = true -data_server_url = "http://localhost:12001/data" -enable_home_creation = true - -[grpc.services.storageprovider.drivers.owncloud] -datadirectory = "/var/tmp/reva/data" -enable_home = true - - -[http] -address = "0.0.0.0:12001" - -[http.services.dataprovider] -driver = "owncloud" -temp_folder = "/var/tmp/reva/tmp" - -[http.services.dataprovider.drivers.owncloud] -datadirectory = "/var/tmp/reva/data" -enable_home = true diff --git a/examples/oc-phoenix/storage-oc.toml b/examples/oc-phoenix/storage-oc.toml deleted file mode 100644 index 5aac69f672..0000000000 --- a/examples/oc-phoenix/storage-oc.toml +++ /dev/null @@ -1,34 +0,0 @@ -# This storage.toml config file will start a reva service that: -[shared] -jwt_secret = "Pive-Fumkiu4" -gatewaysvc = "localhost:19000" - -# - authenticates grpc storage provider requests using the internal jwt token -# - authenticates http upload and download requests requests using basic auth -# - serves the storage provider on grpc port 11000 -# - serves http dataprovider for this storage on port 11001 -# - /data - dataprovider: file up and download -[grpc] -address = "0.0.0.0:11000" - -# This is a storage provider that grants direct access to the wrapped storage -# we have a locally running dataprovider -[grpc.services.storageprovider] -driver = "owncloud" -expose_data_server = true -data_server_url = "http://localhost:11001/data" - -[grpc.services.storageprovider.drivers.owncloud] -datadirectory = "/var/tmp/reva/data" -redis = "redis:6379" -userprovidersvc = "localhost:18000" - -[http] -address = "0.0.0.0:11001" - -[http.services.dataprovider] -driver = "owncloud" -temp_folder = "/var/tmp/reva/tmp" - -[http.services.dataprovider.drivers.owncloud] -datadirectory = "/var/tmp/reva/data" diff --git a/tests/integration/grpc/fixtures/storageprovider-owncloud.toml b/tests/integration/grpc/fixtures/storageprovider-owncloud.toml deleted file mode 100644 index 1cd808cea6..0000000000 --- a/tests/integration/grpc/fixtures/storageprovider-owncloud.toml +++ /dev/null @@ -1,12 +0,0 @@ -[grpc] -address = "{{grpc_address}}" - -[grpc.services.storageprovider] -driver = "owncloud" - -[grpc.services.storageprovider.drivers.owncloud] -enable_home = {{enable_home}} -datadirectory = "{{root}}/storage" -userprovidersvc = "{{users_address}}" -mount_id = "{{id}}" -redis = "{{redis_address}}" \ No newline at end of file diff --git a/tests/integration/grpc/storageprovider_test.go b/tests/integration/grpc/storageprovider_test.go index 36dac5e65c..2b005eaec5 100644 --- a/tests/integration/grpc/storageprovider_test.go +++ b/tests/integration/grpc/storageprovider_test.go @@ -20,7 +20,6 @@ package grpc_test import ( "context" - "os" "google.golang.org/grpc/metadata" @@ -32,7 +31,6 @@ import ( "github.com/cs3org/reva/pkg/rgrpc/todo/pool" "github.com/cs3org/reva/pkg/storage" "github.com/cs3org/reva/pkg/storage/fs/ocis" - "github.com/cs3org/reva/pkg/storage/fs/owncloud" jwt "github.com/cs3org/reva/pkg/token/manager/jwt" "github.com/cs3org/reva/tests/helpers" @@ -64,11 +62,6 @@ func createFS(provider string, revads map[string]*Revad) (storage.FS, error) { conf["root"] = revads["storage"].StorageRoot conf["enable_home"] = true f = ocis.New - case "owncloud": - conf["datadirectory"] = revads["storage"].StorageRoot - conf["userprovidersvc"] = revads["users"].GrpcAddress - conf["enable_home"] = true - f = owncloud.New } return f(conf) } @@ -183,7 +176,7 @@ var _ = Describe("storage providers", func() { switch provider { case "ocis": Expect(len(listRes.Infos)).To(Equal(2)) // subdir + .space - case "owncloud", "nextcloud": + case "nextcloud": Expect(len(listRes.Infos)).To(Equal(1)) // subdir default: Fail("unknown provider") @@ -288,10 +281,8 @@ var _ = Describe("storage providers", func() { Expect(err).ToNot(HaveOccurred()) // TODO: FIXME both cases should work for all providers - if provider != "owncloud" { - Expect(res.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) - } - if provider != "nextcloud" && provider != "owncloud" { + Expect(res.Status.Code).To(Equal(rpcv1beta1.Code_CODE_OK)) + if provider != "nextcloud" { Expect(res.Path).To(Equal(subdirPath)) } }) @@ -558,13 +549,6 @@ var _ = Describe("storage providers", func() { variables = map[string]string{ "enable_home": "true", } - if provider == "owncloud" { - redisAddress := os.Getenv("REDIS_ADDRESS") - if redisAddress == "" { - Fail("REDIS_ADDRESS not set") - } - variables["redis_address"] = redisAddress - } }) assertCreateHome(provider) @@ -637,8 +621,4 @@ var _ = Describe("storage providers", func() { "storage": "storageprovider-ocis.toml", }) - suite("owncloud", map[string]string{ - "users": "userprovider-json.toml", - "storage": "storageprovider-owncloud.toml", - }) })