diff --git a/.drone.star b/.drone.star index a8238e1d3d..dfb9275a61 100644 --- a/.drone.star +++ b/.drone.star @@ -1,5 +1,5 @@ # images -OC_CI_GOLANG = "owncloudci/golang:1.21" +OC_CI_GOLANG = "owncloudci/golang:1.22@sha256:bc1ff4ac994a432146b0424207ca89985491496fcc534156cad5ad5d9e7e216d" OC_CI_ALPINE = "owncloudci/alpine:latest" OSIXIA_OPEN_LDAP = "osixia/openldap:1.3.0" REDIS = "redis:6-alpine" @@ -103,7 +103,7 @@ def cephService(): def main(ctx): # In order to run specific parts only, specify the parts as # ocisIntegrationTests(6, [1, 4]) - this will only run 1st and 4th parts - # implemented for: ocisIntegrationTests and s3ngIntegrationTests + # implemented for: ocisIntegrationTests, posixfsIntegrationTests and s3ngIntegrationTests return [ checkStarlark(), checkGoGenerate(), @@ -117,7 +117,7 @@ def main(ctx): cs3ApiValidatorS3NG(), # virtual views don't work on edge at the moment #virtualViews(), - ] + ocisIntegrationTests(6) + s3ngIntegrationTests(12) + ] + ocisIntegrationTests(6) + s3ngIntegrationTests(12) + posixfsIntegrationTests(6) def coverage(): return { @@ -705,6 +705,83 @@ def s3ngIntegrationTests(parallelRuns, skipExceptParts = []): return pipelines +def posixfsIntegrationTests(parallelRuns, skipExceptParts = []): + pipelines = [] + debugPartsEnabled = (len(skipExceptParts) != 0) + for runPart in range(1, parallelRuns + 1): + if debugPartsEnabled and runPart not in skipExceptParts: + continue + + pipelines.append( + { + "kind": "pipeline", + "type": "docker", + "name": "posixfs-integration-tests-%s" % runPart, + "platform": { + "os": "linux", + "arch": "amd64", + }, + "trigger": { + "ref": [ + "refs/heads/master", + "refs/heads/edge", + "refs/pull/**", + ], + }, + "steps": [ + makeStep("build-ci"), + { + "name": "revad-services", + "image": OC_CI_GOLANG, + "detach": True, + "commands": [ + "cd /drone/src/tests/oc-integration-tests/drone/", + "/drone/src/cmd/revad/revad -c frontend.toml &", + "/drone/src/cmd/revad/revad -c gateway.toml &", + "/drone/src/cmd/revad/revad -c shares.toml &", + "/drone/src/cmd/revad/revad -c storage-shares.toml &", + "/drone/src/cmd/revad/revad -c machine-auth.toml &", + "/drone/src/cmd/revad/revad -c storage-users-posixfs.toml &", + "/drone/src/cmd/revad/revad -c storage-publiclink.toml &", + "/drone/src/cmd/revad/revad -c permissions-ocis-ci.toml &", + "/drone/src/cmd/revad/revad -c ldap-users.toml", + ], + }, + cloneApiTestReposStep(), + { + "name": "APIAcceptanceTestsPosixStorage", + "image": OC_CI_PHP, + "commands": [ + "cd /drone/src/tmp/testrunner", + "make test-acceptance-from-core-api", + ], + "environment": { + "TEST_SERVER_URL": "http://revad-services:20080", + "OCIS_REVA_DATA_ROOT": "/drone/src/tmp/reva/data/", + "DELETE_USER_DATA_CMD": "rm -rf /drone/src/tmp/reva/data/users/* /drone/src/tmp/reva/data/indexes/by-type/*", + "STORAGE_DRIVER": "ocis", + "SKELETON_DIR": "/drone/src/tmp/testing/data/apiSkeleton", + "TEST_WITH_LDAP": "true", + "REVA_LDAP_HOSTNAME": "ldap", + "TEST_REVA": "true", + "SEND_SCENARIO_LINE_REFERENCES": "true", + "BEHAT_FILTER_TAGS": "~@toImplementOnOCIS&&~comments-app-required&&~@federation-app-required&&~@notifications-app-required&&~systemtags-app-required&&~@provisioning_api-app-required&&~@preview-extension-required&&~@local_storage&&~@skipOnOcis-OCIS-Storage&&~@skipOnGraph&&~@caldav&&~@carddav&&~@skipOnReva&&~@env-config", + "DIVIDE_INTO_NUM_PARTS": parallelRuns, + "RUN_PART": runPart, + "EXPECTED_FAILURES_FILE": "/drone/src/tests/acceptance/expected-failures-on-POSIX-storage.md", + }, + }, + ], + "services": [ + redisService(), + ldapService(), + ], + "depends_on": ["unit-test-coverage"], + }, + ) + + return pipelines + def checkStarlark(): return { "kind": "pipeline", diff --git a/.mockery.yaml b/.mockery.yaml index 9c97a06f3f..839791b141 100644 --- a/.mockery.yaml +++ b/.mockery.yaml @@ -29,6 +29,9 @@ packages: github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree: interfaces: Blobstore: + github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/usermapper: + interfaces: + Mapper: github.com/cs3org/reva/v2/pkg/storage/utils/indexer: interfaces: Indexer: diff --git a/changelog/unreleased/improve-posixfs.md b/changelog/unreleased/improve-posixfs.md new file mode 100644 index 0000000000..5e920bcc5a --- /dev/null +++ b/changelog/unreleased/improve-posixfs.md @@ -0,0 +1,5 @@ +Enhancement: Improve posixfs storage driver + +Improve the posixfs storage driver by fixing several issues and adding missing features. + +https://github.com/cs3org/reva/pull/4562 diff --git a/go.mod b/go.mod index 07d83782e3..3f43f5d947 100644 --- a/go.mod +++ b/go.mod @@ -28,6 +28,7 @@ require ( github.com/go-micro/plugins/v4/events/natsjs v1.2.2-0.20231215124540-f7f8d3274bf9 github.com/go-micro/plugins/v4/server/http v1.2.2 github.com/go-micro/plugins/v4/store/nats-js v1.2.0 + github.com/go-micro/plugins/v4/store/nats-js-kv v0.0.0-20231226212146-94a49ba3e06e github.com/go-micro/plugins/v4/store/redis v1.2.1 github.com/go-playground/locales v0.14.1 github.com/go-playground/universal-translator v0.18.1 @@ -61,6 +62,7 @@ require ( github.com/onsi/ginkgo/v2 v2.15.0 github.com/onsi/gomega v1.31.1 github.com/owncloud/ocis/v2 v2.0.0 + github.com/pablodz/inotifywaitgo v0.0.6 github.com/pkg/errors v0.9.1 github.com/pkg/xattr v0.4.9 github.com/prometheus/alertmanager v0.26.0 @@ -68,6 +70,7 @@ require ( github.com/rogpeppe/go-internal v1.12.0 github.com/rs/cors v1.10.1 github.com/rs/zerolog v1.32.0 + github.com/segmentio/kafka-go v0.4.47 github.com/sethvargo/go-password v0.2.0 github.com/shamaton/msgpack/v2 v2.1.1 github.com/stretchr/testify v1.8.4 @@ -88,7 +91,7 @@ require ( golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 golang.org/x/oauth2 v0.17.0 golang.org/x/sync v0.6.0 - golang.org/x/sys v0.17.0 + golang.org/x/sys v0.19.0 golang.org/x/term v0.17.0 golang.org/x/text v0.14.0 google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 @@ -136,7 +139,7 @@ require ( github.com/go-micro/plugins/v4/registry/mdns v1.2.0 // indirect github.com/go-micro/plugins/v4/registry/memory v1.2.0 // indirect github.com/go-micro/plugins/v4/registry/nats v1.2.1 // indirect - github.com/go-micro/plugins/v4/store/nats-js-kv v0.0.0-20231226212146-94a49ba3e06e // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/errors v0.20.4 // indirect github.com/go-openapi/strfmt v0.21.7 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect @@ -185,6 +188,7 @@ require ( github.com/oklog/ulid v1.3.1 // indirect github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect + github.com/pierrec/lz4/v4 v4.1.15 // indirect github.com/pkg/term v1.2.0-beta.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect @@ -195,14 +199,18 @@ require ( github.com/rs/xid v1.5.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.2.0 // indirect + github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 // indirect github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/tidwall/pretty v1.2.1 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect github.com/urfave/cli/v2 v2.25.7 // indirect github.com/xanzy/ssh-agent v0.3.2 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect go.etcd.io/etcd/api/v3 v3.5.12 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.12 // indirect go.mongodb.org/mongo-driver v1.11.3 // indirect diff --git a/go.sum b/go.sum index 5377ff87be..9c1f23fdda 100644 --- a/go.sum +++ b/go.sum @@ -1040,6 +1040,8 @@ github.com/go-micro/plugins/v4/store/nats-js-kv v0.0.0-20231226212146-94a49ba3e0 github.com/go-micro/plugins/v4/store/nats-js-kv v0.0.0-20231226212146-94a49ba3e06e/go.mod h1:Goi4eJ9SrKkxE6NsAVqBVNxfQFbwb7UbyII6743ldgM= github.com/go-micro/plugins/v4/store/redis v1.2.1 h1:d9kwr9bSpoK9vkHkqcv+isQUbgBCHpfwCV57pcAPS6c= github.com/go-micro/plugins/v4/store/redis v1.2.1/go.mod h1:MbCG0YiyPqETTtm7uHFmxQNCaW1o9hBoYtFwhbVjLUg= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= @@ -1328,8 +1330,6 @@ github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02 github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/kobergj/plugins/v4/store/nats-js-kv v0.0.0-20231207143248-4d424e3ae348 h1:Czv6AW9Suj6npWd5BLZjobdD78c2RdzBeKBgkq3jYZk= -github.com/kobergj/plugins/v4/store/nats-js-kv v0.0.0-20231207143248-4d424e3ae348/go.mod h1:Goi4eJ9SrKkxE6NsAVqBVNxfQFbwb7UbyII6743ldgM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -1467,6 +1467,8 @@ github.com/owncloud/ocis/v2 v2.0.0 h1:eHmUpW73dAT0X+JXRStYRzHt9gBUGlysnLg3vjJzac github.com/owncloud/ocis/v2 v2.0.0/go.mod h1:qH016gkfh/PNOv+xfiwD2weWY99nZTTghKhgajshYYk= github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c h1:rp5dCmg/yLR3mgFuSOe4oEnDDmGLROTvMragMUXpTQw= github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c/go.mod h1:X07ZCGwUbLaax7L0S3Tw4hpejzu63ZrrQiUe6W0hcy0= +github.com/pablodz/inotifywaitgo v0.0.6 h1:BTjQfnixXwG7oYmlIiyhWA6iyO9BtxatB3YgiibOTFc= +github.com/pablodz/inotifywaitgo v0.0.6/go.mod h1:OtzRCsYTJlIr+vAzlOtauTkfQ1c25ebFuXq8tbbf8cw= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1475,6 +1477,7 @@ github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTK github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1560,6 +1563,8 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0= +github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -1569,6 +1574,8 @@ github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE= github.com/shamaton/msgpack/v2 v2.1.1 h1:gAMxOtVJz93R0EwewwUc8tx30n34aV6BzJuwHE8ogAk= github.com/shamaton/msgpack/v2 v2.1.1/go.mod h1:aTUEmh31ziGX1Ml7wMPLVY0f4vT3CRsCvZRoSCs+VGg= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU= @@ -1612,6 +1619,10 @@ github.com/thanhpk/randstr v1.0.6/go.mod h1:M/H2P1eNLZzlDwAzpkkkUvoyNNMbzRGhESZu github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/tus/tusd v1.1.0/go.mod h1:3DWPOdeCnjBwKtv98y5dSws3itPqfce5TVa0s59LRiA= github.com/tus/tusd v1.13.0 h1:W7rtb1XPSpde/GPZAgdfUS3vus2Jt2KmckS6OUd3CU8= github.com/tus/tusd v1.13.0/go.mod h1:1tX4CDGlx8koHGFJdSaJ5ybUIm2NeVloJgZEPSKRcQA= @@ -1625,9 +1636,14 @@ github.com/wk8/go-ordered-map v1.0.0/go.mod h1:9ZIbRunKbuvfPKyBP1SIKLcXNlv74YCOZ github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xanzy/ssh-agent v0.3.2 h1:eKj4SX2Fe7mui28ZgnFW5fmTz1EIr7ugo5s6wDxdHBM= github.com/xanzy/ssh-agent v0.3.2/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xhit/go-str2duration v1.2.0/go.mod h1:3cPSlfZlUHVlneIVfePFWcJZsuwf+P1v2SRTV4cUmp4= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= @@ -1639,6 +1655,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go-micro.dev/v4 v4.10.2 h1:GWQf1+FcAiMf1yca3P09RNjB31Xtk0C5HiKHSpq/2qA= @@ -1717,6 +1735,7 @@ golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45 golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1855,6 +1874,7 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2031,8 +2051,11 @@ golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2047,6 +2070,7 @@ golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/internal/grpc/services/storageprovider/storageprovider.go b/internal/grpc/services/storageprovider/storageprovider.go index 2a9982d2e8..d6b8fe1784 100644 --- a/internal/grpc/services/storageprovider/storageprovider.go +++ b/internal/grpc/services/storageprovider/storageprovider.go @@ -101,20 +101,20 @@ func (c *config) init() { } } -type service struct { +type Service struct { conf *config - storage storage.FS + Storage storage.FS dataServerURL *url.URL availableXS []*provider.ResourceChecksumPriority } -func (s *service) Close() error { - return s.storage.Shutdown(context.Background()) +func (s *Service) Close() error { + return s.Storage.Shutdown(context.Background()) } -func (s *service) UnprotectedEndpoints() []string { return []string{} } +func (s *Service) UnprotectedEndpoints() []string { return []string{} } -func (s *service) Register(ss *grpc.Server) { +func (s *Service) Register(ss *grpc.Server) { provider.RegisterProviderAPIServer(ss, s) } @@ -199,9 +199,9 @@ func New(m map[string]interface{}, ss *grpc.Server) (rgrpc.Service, error) { return nil, err } - service := &service{ + service := &Service{ conf: c, - storage: fs, + Storage: fs, dataServerURL: u, availableXS: xsTypes, } @@ -209,20 +209,20 @@ func New(m map[string]interface{}, ss *grpc.Server) (rgrpc.Service, error) { return service, nil } -func (s *service) SetArbitraryMetadata(ctx context.Context, req *provider.SetArbitraryMetadataRequest) (*provider.SetArbitraryMetadataResponse, error) { +func (s *Service) SetArbitraryMetadata(ctx context.Context, req *provider.SetArbitraryMetadataRequest) (*provider.SetArbitraryMetadataResponse, error) { ctx = ctxpkg.ContextSetLockID(ctx, req.LockId) - err := s.storage.SetArbitraryMetadata(ctx, req.Ref, req.ArbitraryMetadata) + err := s.Storage.SetArbitraryMetadata(ctx, req.Ref, req.ArbitraryMetadata) return &provider.SetArbitraryMetadataResponse{ Status: status.NewStatusFromErrType(ctx, "set arbitrary metadata", err), }, nil } -func (s *service) UnsetArbitraryMetadata(ctx context.Context, req *provider.UnsetArbitraryMetadataRequest) (*provider.UnsetArbitraryMetadataResponse, error) { +func (s *Service) UnsetArbitraryMetadata(ctx context.Context, req *provider.UnsetArbitraryMetadataRequest) (*provider.UnsetArbitraryMetadataResponse, error) { ctx = ctxpkg.ContextSetLockID(ctx, req.LockId) - err := s.storage.UnsetArbitraryMetadata(ctx, req.Ref, req.ArbitraryMetadataKeys) + err := s.Storage.UnsetArbitraryMetadata(ctx, req.Ref, req.ArbitraryMetadataKeys) return &provider.UnsetArbitraryMetadataResponse{ Status: status.NewStatusFromErrType(ctx, "unset arbitrary metadata", err), @@ -230,13 +230,13 @@ func (s *service) UnsetArbitraryMetadata(ctx context.Context, req *provider.Unse } // SetLock puts a lock on the given reference -func (s *service) SetLock(ctx context.Context, req *provider.SetLockRequest) (*provider.SetLockResponse, error) { +func (s *Service) SetLock(ctx context.Context, req *provider.SetLockRequest) (*provider.SetLockResponse, error) { if !canLockPublicShare(ctx) { return &provider.SetLockResponse{ Status: status.NewPermissionDenied(ctx, nil, "no permission to lock the share"), }, nil } - err := s.storage.SetLock(ctx, req.Ref, req.Lock) + err := s.Storage.SetLock(ctx, req.Ref, req.Lock) return &provider.SetLockResponse{ Status: status.NewStatusFromErrType(ctx, "set lock", err), @@ -244,8 +244,8 @@ func (s *service) SetLock(ctx context.Context, req *provider.SetLockRequest) (*p } // GetLock returns an existing lock on the given reference -func (s *service) GetLock(ctx context.Context, req *provider.GetLockRequest) (*provider.GetLockResponse, error) { - lock, err := s.storage.GetLock(ctx, req.Ref) +func (s *Service) GetLock(ctx context.Context, req *provider.GetLockRequest) (*provider.GetLockResponse, error) { + lock, err := s.Storage.GetLock(ctx, req.Ref) return &provider.GetLockResponse{ Status: status.NewStatusFromErrType(ctx, "get lock", err), @@ -254,14 +254,14 @@ func (s *service) GetLock(ctx context.Context, req *provider.GetLockRequest) (*p } // RefreshLock refreshes an existing lock on the given reference -func (s *service) RefreshLock(ctx context.Context, req *provider.RefreshLockRequest) (*provider.RefreshLockResponse, error) { +func (s *Service) RefreshLock(ctx context.Context, req *provider.RefreshLockRequest) (*provider.RefreshLockResponse, error) { if !canLockPublicShare(ctx) { return &provider.RefreshLockResponse{ Status: status.NewPermissionDenied(ctx, nil, "no permission to refresh the share lock"), }, nil } - err := s.storage.RefreshLock(ctx, req.Ref, req.Lock, req.ExistingLockId) + err := s.Storage.RefreshLock(ctx, req.Ref, req.Lock, req.ExistingLockId) return &provider.RefreshLockResponse{ Status: status.NewStatusFromErrType(ctx, "refresh lock", err), @@ -269,21 +269,21 @@ func (s *service) RefreshLock(ctx context.Context, req *provider.RefreshLockRequ } // Unlock removes an existing lock from the given reference -func (s *service) Unlock(ctx context.Context, req *provider.UnlockRequest) (*provider.UnlockResponse, error) { +func (s *Service) Unlock(ctx context.Context, req *provider.UnlockRequest) (*provider.UnlockResponse, error) { if !canLockPublicShare(ctx) { return &provider.UnlockResponse{ Status: status.NewPermissionDenied(ctx, nil, "no permission to unlock the share"), }, nil } - err := s.storage.Unlock(ctx, req.Ref, req.Lock) + err := s.Storage.Unlock(ctx, req.Ref, req.Lock) return &provider.UnlockResponse{ Status: status.NewStatusFromErrType(ctx, "unlock", err), }, nil } -func (s *service) InitiateFileDownload(ctx context.Context, req *provider.InitiateFileDownloadRequest) (*provider.InitiateFileDownloadResponse, error) { +func (s *Service) InitiateFileDownload(ctx context.Context, req *provider.InitiateFileDownloadRequest) (*provider.InitiateFileDownloadResponse, error) { // TODO(labkode): maybe add some checks before download starts? eg. check permissions? // TODO(labkode): maybe add short-lived token? // We now simply point the client to the data server. @@ -329,7 +329,7 @@ func validateIfUnmodifiedSince(ifUnmodifiedSince *typesv1beta1.Timestamp, info * } } -func (s *service) InitiateFileUpload(ctx context.Context, req *provider.InitiateFileUploadRequest) (*provider.InitiateFileUploadResponse, error) { +func (s *Service) InitiateFileUpload(ctx context.Context, req *provider.InitiateFileUploadRequest) (*provider.InitiateFileUploadResponse, error) { // TODO(labkode): same considerations as download log := appctx.GetLogger(ctx) if req.Ref.GetPath() == "/" { @@ -412,7 +412,7 @@ func (s *service) InitiateFileUpload(ctx context.Context, req *provider.Initiate metadata["expires"] = strconv.Itoa(int(expirationTimestamp.Seconds)) } - uploadIDs, err := s.storage.InitiateUpload(ctx, req.Ref, uploadLength, metadata) + uploadIDs, err := s.Storage.InitiateUpload(ctx, req.Ref, uploadLength, metadata) if err != nil { var st *rpc.Status switch err.(type) { @@ -477,9 +477,9 @@ func (s *service) InitiateFileUpload(ctx context.Context, req *provider.Initiate return res, nil } -func (s *service) GetPath(ctx context.Context, req *provider.GetPathRequest) (*provider.GetPathResponse, error) { +func (s *Service) GetPath(ctx context.Context, req *provider.GetPathRequest) (*provider.GetPathResponse, error) { // TODO(labkode): check that the storage ID is the same as the storage provider id. - fn, err := s.storage.GetPathByID(ctx, req.ResourceId) + fn, err := s.Storage.GetPathByID(ctx, req.ResourceId) if err != nil { return &provider.GetPathResponse{ Status: status.NewStatusFromErrType(ctx, "get path", err), @@ -492,17 +492,17 @@ func (s *service) GetPath(ctx context.Context, req *provider.GetPathRequest) (*p return res, nil } -func (s *service) GetHome(ctx context.Context, req *provider.GetHomeRequest) (*provider.GetHomeResponse, error) { +func (s *Service) GetHome(ctx context.Context, req *provider.GetHomeRequest) (*provider.GetHomeResponse, error) { return nil, errtypes.NotSupported("unused, use the gateway to look up the user home") } -func (s *service) CreateHome(ctx context.Context, req *provider.CreateHomeRequest) (*provider.CreateHomeResponse, error) { +func (s *Service) CreateHome(ctx context.Context, req *provider.CreateHomeRequest) (*provider.CreateHomeResponse, error) { return nil, errtypes.NotSupported("use CreateStorageSpace with type personal") } // CreateStorageSpace creates a storage space -func (s *service) CreateStorageSpace(ctx context.Context, req *provider.CreateStorageSpaceRequest) (*provider.CreateStorageSpaceResponse, error) { - resp, err := s.storage.CreateStorageSpace(ctx, req) +func (s *Service) CreateStorageSpace(ctx context.Context, req *provider.CreateStorageSpaceRequest) (*provider.CreateStorageSpaceResponse, error) { + resp, err := s.Storage.CreateStorageSpace(ctx, req) if err != nil { var st *rpc.Status switch err.(type) { @@ -513,7 +513,7 @@ func (s *service) CreateStorageSpace(ctx context.Context, req *provider.CreateSt case errtypes.NotSupported: // if trying to create a user home fall back to CreateHome if u, ok := ctxpkg.ContextGetUser(ctx); ok && req.Type == "personal" && utils.UserEqual(req.GetOwner().Id, u.Id) { - if err := s.storage.CreateHome(ctx); err != nil { + if err := s.Storage.CreateHome(ctx); err != nil { st = status.NewInternal(ctx, "error creating home") } else { st = status.NewOK(ctx) @@ -544,7 +544,7 @@ func (s *service) CreateStorageSpace(ctx context.Context, req *provider.CreateSt return resp, nil } -func (s *service) ListStorageSpaces(ctx context.Context, req *provider.ListStorageSpacesRequest) (*provider.ListStorageSpacesResponse, error) { +func (s *Service) ListStorageSpaces(ctx context.Context, req *provider.ListStorageSpacesRequest) (*provider.ListStorageSpacesResponse, error) { log := appctx.GetLogger(ctx) // TODO this is just temporary. Update the API to include this flag. @@ -555,7 +555,7 @@ func (s *service) ListStorageSpaces(ctx context.Context, req *provider.ListStora } } - spaces, err := s.storage.ListStorageSpaces(ctx, req.Filters, unrestricted) + spaces, err := s.Storage.ListStorageSpaces(ctx, req.Filters, unrestricted) if err != nil { var st *rpc.Status switch err.(type) { @@ -593,8 +593,8 @@ func (s *service) ListStorageSpaces(ctx context.Context, req *provider.ListStora }, nil } -func (s *service) UpdateStorageSpace(ctx context.Context, req *provider.UpdateStorageSpaceRequest) (*provider.UpdateStorageSpaceResponse, error) { - res, err := s.storage.UpdateStorageSpace(ctx, req) +func (s *Service) UpdateStorageSpace(ctx context.Context, req *provider.UpdateStorageSpaceRequest) (*provider.UpdateStorageSpaceResponse, error) { + res, err := s.Storage.UpdateStorageSpace(ctx, req) if err != nil { appctx.GetLogger(ctx). Error(). @@ -607,14 +607,14 @@ func (s *service) UpdateStorageSpace(ctx context.Context, req *provider.UpdateSt return res, nil } -func (s *service) DeleteStorageSpace(ctx context.Context, req *provider.DeleteStorageSpaceRequest) (*provider.DeleteStorageSpaceResponse, error) { +func (s *Service) DeleteStorageSpace(ctx context.Context, req *provider.DeleteStorageSpaceRequest) (*provider.DeleteStorageSpaceResponse, error) { // we need to get the space before so we can return critical information // FIXME: why is this string parsing necessary? idraw, _ := storagespace.ParseID(req.Id.GetOpaqueId()) idraw.OpaqueId = idraw.GetSpaceId() id := &provider.StorageSpaceId{OpaqueId: storagespace.FormatResourceID(idraw)} - spaces, err := s.storage.ListStorageSpaces(ctx, []*provider.ListStorageSpacesRequest_Filter{{Type: provider.ListStorageSpacesRequest_Filter_TYPE_ID, Term: &provider.ListStorageSpacesRequest_Filter_Id{Id: id}}}, true) + spaces, err := s.Storage.ListStorageSpaces(ctx, []*provider.ListStorageSpacesRequest_Filter{{Type: provider.ListStorageSpacesRequest_Filter_TYPE_ID, Term: &provider.ListStorageSpacesRequest_Filter_Id{Id: id}}}, true) if err != nil { var st *rpc.Status switch err.(type) { @@ -636,7 +636,7 @@ func (s *service) DeleteStorageSpace(ctx context.Context, req *provider.DeleteSt }, nil } - if err := s.storage.DeleteStorageSpace(ctx, req); err != nil { + if err := s.Storage.DeleteStorageSpace(ctx, req); err != nil { var st *rpc.Status switch err.(type) { case errtypes.IsNotFound: @@ -670,7 +670,7 @@ func (s *service) DeleteStorageSpace(ctx context.Context, req *provider.DeleteSt return res, nil } -func (s *service) CreateContainer(ctx context.Context, req *provider.CreateContainerRequest) (*provider.CreateContainerResponse, error) { +func (s *Service) CreateContainer(ctx context.Context, req *provider.CreateContainerRequest) (*provider.CreateContainerResponse, error) { // FIXME these should be part of the CreateContainerRequest object if req.Opaque != nil { if e, ok := req.Opaque.Map["lockid"]; ok && e.Decoder == "plain" { @@ -678,14 +678,14 @@ func (s *service) CreateContainer(ctx context.Context, req *provider.CreateConta } } - err := s.storage.CreateDir(ctx, req.Ref) + err := s.Storage.CreateDir(ctx, req.Ref) return &provider.CreateContainerResponse{ Status: status.NewStatusFromErrType(ctx, "create container", err), }, nil } -func (s *service) TouchFile(ctx context.Context, req *provider.TouchFileRequest) (*provider.TouchFileResponse, error) { +func (s *Service) TouchFile(ctx context.Context, req *provider.TouchFileRequest) (*provider.TouchFileResponse, error) { // FIXME these should be part of the TouchFileRequest object var mtime string if req.Opaque != nil { @@ -695,14 +695,14 @@ func (s *service) TouchFile(ctx context.Context, req *provider.TouchFileRequest) mtime = utils.ReadPlainFromOpaque(req.Opaque, "X-OC-Mtime") } - err := s.storage.TouchFile(ctx, req.Ref, utils.ExistsInOpaque(req.Opaque, "markprocessing"), mtime) + err := s.Storage.TouchFile(ctx, req.Ref, utils.ExistsInOpaque(req.Opaque, "markprocessing"), mtime) return &provider.TouchFileResponse{ Status: status.NewStatusFromErrType(ctx, "touch file", err), }, nil } -func (s *service) Delete(ctx context.Context, req *provider.DeleteRequest) (*provider.DeleteResponse, error) { +func (s *Service) Delete(ctx context.Context, req *provider.DeleteRequest) (*provider.DeleteResponse, error) { if req.Ref.GetPath() == "/" { return &provider.DeleteResponse{ Status: status.NewInternal(ctx, "can't delete mount path"), @@ -720,7 +720,7 @@ func (s *service) Delete(ctx context.Context, req *provider.DeleteRequest) (*pro } } - md, err := s.storage.GetMD(ctx, req.Ref, []string{}, []string{"id", "status"}) + md, err := s.Storage.GetMD(ctx, req.Ref, []string{}, []string{"id", "status"}) if err != nil { return &provider.DeleteResponse{ Status: status.NewStatusFromErrType(ctx, "can't stat resource to delete", err), @@ -741,7 +741,7 @@ func (s *service) Delete(ctx context.Context, req *provider.DeleteRequest) (*pro }, nil } - err = s.storage.Delete(ctx, req.Ref) + err = s.Storage.Delete(ctx, req.Ref) return &provider.DeleteResponse{ Status: status.NewStatusFromErrType(ctx, "delete", err), @@ -753,17 +753,17 @@ func (s *service) Delete(ctx context.Context, req *provider.DeleteRequest) (*pro }, nil } -func (s *service) Move(ctx context.Context, req *provider.MoveRequest) (*provider.MoveResponse, error) { +func (s *Service) Move(ctx context.Context, req *provider.MoveRequest) (*provider.MoveResponse, error) { ctx = ctxpkg.ContextSetLockID(ctx, req.LockId) - err := s.storage.Move(ctx, req.Source, req.Destination) + err := s.Storage.Move(ctx, req.Source, req.Destination) return &provider.MoveResponse{ Status: status.NewStatusFromErrType(ctx, "move", err), }, nil } -func (s *service) Stat(ctx context.Context, req *provider.StatRequest) (*provider.StatResponse, error) { +func (s *Service) Stat(ctx context.Context, req *provider.StatRequest) (*provider.StatResponse, error) { ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "stat") defer span.End() @@ -772,7 +772,7 @@ func (s *service) Stat(ctx context.Context, req *provider.StatRequest) (*provide Value: attribute.StringValue(req.GetRef().String()), }) - md, err := s.storage.GetMD(ctx, req.GetRef(), req.GetArbitraryMetadataKeys(), req.GetFieldMask().GetPaths()) + md, err := s.Storage.GetMD(ctx, req.GetRef(), req.GetArbitraryMetadataKeys(), req.GetFieldMask().GetPaths()) if err != nil { return &provider.StatResponse{ Status: status.NewStatusFromErrType(ctx, "stat", err), @@ -789,11 +789,11 @@ func (s *service) Stat(ctx context.Context, req *provider.StatRequest) (*provide }, nil } -func (s *service) ListContainerStream(req *provider.ListContainerStreamRequest, ss provider.ProviderAPI_ListContainerStreamServer) error { +func (s *Service) ListContainerStream(req *provider.ListContainerStreamRequest, ss provider.ProviderAPI_ListContainerStreamServer) error { ctx := ss.Context() log := appctx.GetLogger(ctx) - mds, err := s.storage.ListFolder(ctx, req.GetRef(), req.GetArbitraryMetadataKeys(), req.GetFieldMask().GetPaths()) + mds, err := s.Storage.ListFolder(ctx, req.GetRef(), req.GetArbitraryMetadataKeys(), req.GetFieldMask().GetPaths()) if err != nil { var st *rpc.Status switch err.(type) { @@ -836,8 +836,8 @@ func (s *service) ListContainerStream(req *provider.ListContainerStreamRequest, return nil } -func (s *service) ListContainer(ctx context.Context, req *provider.ListContainerRequest) (*provider.ListContainerResponse, error) { - mds, err := s.storage.ListFolder(ctx, req.GetRef(), req.GetArbitraryMetadataKeys(), req.GetFieldMask().GetPaths()) +func (s *Service) ListContainer(ctx context.Context, req *provider.ListContainerRequest) (*provider.ListContainerResponse, error) { + mds, err := s.Storage.ListFolder(ctx, req.GetRef(), req.GetArbitraryMetadataKeys(), req.GetFieldMask().GetPaths()) res := &provider.ListContainerResponse{ Status: status.NewStatusFromErrType(ctx, "list container", err), Infos: mds, @@ -854,8 +854,8 @@ func (s *service) ListContainer(ctx context.Context, req *provider.ListContainer return res, nil } -func (s *service) ListFileVersions(ctx context.Context, req *provider.ListFileVersionsRequest) (*provider.ListFileVersionsResponse, error) { - revs, err := s.storage.ListRevisions(ctx, req.Ref) +func (s *Service) ListFileVersions(ctx context.Context, req *provider.ListFileVersionsRequest) (*provider.ListFileVersionsResponse, error) { + revs, err := s.Storage.ListRevisions(ctx, req.Ref) sort.Sort(descendingMtime(revs)) @@ -865,22 +865,22 @@ func (s *service) ListFileVersions(ctx context.Context, req *provider.ListFileVe }, nil } -func (s *service) RestoreFileVersion(ctx context.Context, req *provider.RestoreFileVersionRequest) (*provider.RestoreFileVersionResponse, error) { +func (s *Service) RestoreFileVersion(ctx context.Context, req *provider.RestoreFileVersionRequest) (*provider.RestoreFileVersionResponse, error) { ctx = ctxpkg.ContextSetLockID(ctx, req.LockId) - err := s.storage.RestoreRevision(ctx, req.Ref, req.Key) + err := s.Storage.RestoreRevision(ctx, req.Ref, req.Key) return &provider.RestoreFileVersionResponse{ Status: status.NewStatusFromErrType(ctx, "restore file version", err), }, nil } -func (s *service) ListRecycleStream(req *provider.ListRecycleStreamRequest, ss provider.ProviderAPI_ListRecycleStreamServer) error { +func (s *Service) ListRecycleStream(req *provider.ListRecycleStreamRequest, ss provider.ProviderAPI_ListRecycleStreamServer) error { ctx := ss.Context() log := appctx.GetLogger(ctx) key, itemPath := router.ShiftPath(req.Key) - items, err := s.storage.ListRecycle(ctx, req.Ref, key, itemPath) + items, err := s.Storage.ListRecycle(ctx, req.Ref, key, itemPath) if err != nil { var st *rpc.Status switch err.(type) { @@ -922,9 +922,9 @@ func (s *service) ListRecycleStream(req *provider.ListRecycleStreamRequest, ss p return nil } -func (s *service) ListRecycle(ctx context.Context, req *provider.ListRecycleRequest) (*provider.ListRecycleResponse, error) { +func (s *Service) ListRecycle(ctx context.Context, req *provider.ListRecycleRequest) (*provider.ListRecycleResponse, error) { key, itemPath := router.ShiftPath(req.Key) - items, err := s.storage.ListRecycle(ctx, req.Ref, key, itemPath) + items, err := s.Storage.ListRecycle(ctx, req.Ref, key, itemPath) if err != nil { var st *rpc.Status switch err.(type) { @@ -957,12 +957,12 @@ func (s *service) ListRecycle(ctx context.Context, req *provider.ListRecycleRequ return res, nil } -func (s *service) RestoreRecycleItem(ctx context.Context, req *provider.RestoreRecycleItemRequest) (*provider.RestoreRecycleItemResponse, error) { +func (s *Service) RestoreRecycleItem(ctx context.Context, req *provider.RestoreRecycleItemRequest) (*provider.RestoreRecycleItemResponse, error) { ctx = ctxpkg.ContextSetLockID(ctx, req.LockId) // TODO(labkode): CRITICAL: fill recycle info with storage provider. key, itemPath := router.ShiftPath(req.Key) - err := s.storage.RestoreRecycleItem(ctx, req.Ref, key, itemPath, req.RestoreRef) + err := s.Storage.RestoreRecycleItem(ctx, req.Ref, key, itemPath, req.RestoreRef) res := &provider.RestoreRecycleItemResponse{ Status: status.NewStatusFromErrType(ctx, "restore recycle item", err), @@ -970,7 +970,7 @@ func (s *service) RestoreRecycleItem(ctx context.Context, req *provider.RestoreR return res, nil } -func (s *service) PurgeRecycle(ctx context.Context, req *provider.PurgeRecycleRequest) (*provider.PurgeRecycleResponse, error) { +func (s *Service) PurgeRecycle(ctx context.Context, req *provider.PurgeRecycleRequest) (*provider.PurgeRecycleResponse, error) { // FIXME these should be part of the PurgeRecycleRequest object if req.Opaque != nil { if e, ok := req.Opaque.Map["lockid"]; ok && e.Decoder == "plain" { @@ -981,7 +981,7 @@ func (s *service) PurgeRecycle(ctx context.Context, req *provider.PurgeRecycleRe // if a key was sent as opaque id purge only that item key, itemPath := router.ShiftPath(req.Key) if key != "" { - if err := s.storage.PurgeRecycleItem(ctx, req.Ref, key, itemPath); err != nil { + if err := s.Storage.PurgeRecycleItem(ctx, req.Ref, key, itemPath); err != nil { st := status.NewStatusFromErrType(ctx, "error purging recycle item", err) appctx.GetLogger(ctx). Error(). @@ -994,7 +994,7 @@ func (s *service) PurgeRecycle(ctx context.Context, req *provider.PurgeRecycleRe Status: st, }, nil } - } else if err := s.storage.EmptyRecycle(ctx, req.Ref); err != nil { + } else if err := s.Storage.EmptyRecycle(ctx, req.Ref); err != nil { // otherwise try emptying the whole recycle bin st := status.NewStatusFromErrType(ctx, "error emptying recycle", err) appctx.GetLogger(ctx). @@ -1015,8 +1015,8 @@ func (s *service) PurgeRecycle(ctx context.Context, req *provider.PurgeRecycleRe return res, nil } -func (s *service) ListGrants(ctx context.Context, req *provider.ListGrantsRequest) (*provider.ListGrantsResponse, error) { - grants, err := s.storage.ListGrants(ctx, req.Ref) +func (s *Service) ListGrants(ctx context.Context, req *provider.ListGrantsRequest) (*provider.ListGrantsResponse, error) { + grants, err := s.Storage.ListGrants(ctx, req.Ref) if err != nil { var st *rpc.Status switch err.(type) { @@ -1045,7 +1045,7 @@ func (s *service) ListGrants(ctx context.Context, req *provider.ListGrantsReques return res, nil } -func (s *service) DenyGrant(ctx context.Context, req *provider.DenyGrantRequest) (*provider.DenyGrantResponse, error) { +func (s *Service) DenyGrant(ctx context.Context, req *provider.DenyGrantRequest) (*provider.DenyGrantResponse, error) { // check grantee type is valid if req.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_INVALID { return &provider.DenyGrantResponse{ @@ -1053,7 +1053,7 @@ func (s *service) DenyGrant(ctx context.Context, req *provider.DenyGrantRequest) }, nil } - err := s.storage.DenyGrant(ctx, req.Ref, req.Grantee) + err := s.Storage.DenyGrant(ctx, req.Ref, req.Grantee) if err != nil { var st *rpc.Status switch err.(type) { @@ -1086,7 +1086,7 @@ func (s *service) DenyGrant(ctx context.Context, req *provider.DenyGrantRequest) return res, nil } -func (s *service) AddGrant(ctx context.Context, req *provider.AddGrantRequest) (*provider.AddGrantResponse, error) { +func (s *Service) AddGrant(ctx context.Context, req *provider.AddGrantRequest) (*provider.AddGrantResponse, error) { ctx = ctxpkg.ContextSetLockID(ctx, req.LockId) // TODO: update CS3 APIs @@ -1109,14 +1109,14 @@ func (s *service) AddGrant(ctx context.Context, req *provider.AddGrantRequest) ( }, nil } - err := s.storage.AddGrant(ctx, req.Ref, req.Grant) + err := s.Storage.AddGrant(ctx, req.Ref, req.Grant) return &provider.AddGrantResponse{ Status: status.NewStatusFromErrType(ctx, "add grant", err), }, nil } -func (s *service) UpdateGrant(ctx context.Context, req *provider.UpdateGrantRequest) (*provider.UpdateGrantResponse, error) { +func (s *Service) UpdateGrant(ctx context.Context, req *provider.UpdateGrantRequest) (*provider.UpdateGrantResponse, error) { // FIXME these should be part of the UpdateGrantRequest object if req.Opaque != nil { if e, ok := req.Opaque.Map["lockid"]; ok && e.Decoder == "plain" { @@ -1144,14 +1144,14 @@ func (s *service) UpdateGrant(ctx context.Context, req *provider.UpdateGrantRequ }, nil } - err := s.storage.UpdateGrant(ctx, req.Ref, req.Grant) + err := s.Storage.UpdateGrant(ctx, req.Ref, req.Grant) return &provider.UpdateGrantResponse{ Status: status.NewStatusFromErrType(ctx, "update grant", err), }, nil } -func (s *service) RemoveGrant(ctx context.Context, req *provider.RemoveGrantRequest) (*provider.RemoveGrantResponse, error) { +func (s *Service) RemoveGrant(ctx context.Context, req *provider.RemoveGrantRequest) (*provider.RemoveGrantResponse, error) { ctx = ctxpkg.ContextSetLockID(ctx, req.LockId) // check targetType is valid @@ -1168,14 +1168,14 @@ func (s *service) RemoveGrant(ctx context.Context, req *provider.RemoveGrantRequ ctx = context.WithValue(ctx, utils.SpaceGrant, struct{}{}) } - err := s.storage.RemoveGrant(ctx, req.Ref, req.Grant) + err := s.Storage.RemoveGrant(ctx, req.Ref, req.Grant) return &provider.RemoveGrantResponse{ Status: status.NewStatusFromErrType(ctx, "remove grant", err), }, nil } -func (s *service) CreateReference(ctx context.Context, req *provider.CreateReferenceRequest) (*provider.CreateReferenceResponse, error) { +func (s *Service) CreateReference(ctx context.Context, req *provider.CreateReferenceRequest) (*provider.CreateReferenceResponse, error) { log := appctx.GetLogger(ctx) // parse uri is valid @@ -1187,7 +1187,7 @@ func (s *service) CreateReference(ctx context.Context, req *provider.CreateRefer }, nil } - if err := s.storage.CreateReference(ctx, req.Ref.GetPath(), u); err != nil { + if err := s.Storage.CreateReference(ctx, req.Ref.GetPath(), u); err != nil { var st *rpc.Status switch err.(type) { case errtypes.IsNotFound: @@ -1212,14 +1212,14 @@ func (s *service) CreateReference(ctx context.Context, req *provider.CreateRefer }, nil } -func (s *service) CreateSymlink(ctx context.Context, req *provider.CreateSymlinkRequest) (*provider.CreateSymlinkResponse, error) { +func (s *Service) CreateSymlink(ctx context.Context, req *provider.CreateSymlinkRequest) (*provider.CreateSymlinkResponse, error) { return &provider.CreateSymlinkResponse{ Status: status.NewUnimplemented(ctx, errtypes.NotSupported("CreateSymlink not implemented"), "CreateSymlink not implemented"), }, nil } -func (s *service) GetQuota(ctx context.Context, req *provider.GetQuotaRequest) (*provider.GetQuotaResponse, error) { - total, used, remaining, err := s.storage.GetQuota(ctx, req.Ref) +func (s *Service) GetQuota(ctx context.Context, req *provider.GetQuotaRequest) (*provider.GetQuotaResponse, error) { + total, used, remaining, err := s.Storage.GetQuota(ctx, req.Ref) if err != nil { var st *rpc.Status switch err.(type) { @@ -1257,7 +1257,7 @@ func (s *service) GetQuota(ctx context.Context, req *provider.GetQuotaRequest) ( return res, nil } -func (s *service) addMissingStorageProviderID(resourceID *provider.ResourceId, spaceID *provider.StorageSpaceId) { +func (s *Service) addMissingStorageProviderID(resourceID *provider.ResourceId, spaceID *provider.StorageSpaceId) { // The storage driver might set the mount ID by itself, in which case skip this step if resourceID != nil && resourceID.GetStorageId() == "" { resourceID.StorageId = s.conf.MountID diff --git a/pkg/rhttp/datatx/manager/tus/tus.go b/pkg/rhttp/datatx/manager/tus/tus.go index f86b100866..d60fa1e686 100644 --- a/pkg/rhttp/datatx/manager/tus/tus.go +++ b/pkg/rhttp/datatx/manager/tus/tus.go @@ -82,7 +82,7 @@ func New(m map[string]interface{}, publisher events.Publisher) (datatx.DataTX, e } func (m *manager) Handler(fs storage.FS) (http.Handler, error) { - composable, ok := fs.(composable) + composable, ok := fs.(storage.ComposableFS) if !ok { return nil, errtypes.NotSupported("file system does not support the tus protocol") } @@ -193,12 +193,6 @@ func (m *manager) Handler(fs storage.FS) (http.Handler, error) { return h, nil } -// Composable is the interface that a struct needs to implement -// to be composable, so that it can support the TUS methods -type composable interface { - UseIn(composer *tusd.StoreComposer) -} - func setHeaders(fs storage.FS, w http.ResponseWriter, r *http.Request) { ctx := r.Context() id := path.Base(r.URL.Path) diff --git a/pkg/storage/fs/loader/loader.go b/pkg/storage/fs/loader/loader.go index 71475e38a9..68575752c9 100644 --- a/pkg/storage/fs/loader/loader.go +++ b/pkg/storage/fs/loader/loader.go @@ -31,7 +31,6 @@ import ( _ "github.com/cs3org/reva/v2/pkg/storage/fs/nextcloud" _ "github.com/cs3org/reva/v2/pkg/storage/fs/ocis" _ "github.com/cs3org/reva/v2/pkg/storage/fs/owncloudsql" - _ "github.com/cs3org/reva/v2/pkg/storage/fs/posix" _ "github.com/cs3org/reva/v2/pkg/storage/fs/s3" _ "github.com/cs3org/reva/v2/pkg/storage/fs/s3ng" // Add your own here diff --git a/pkg/storage/fs/loader/loader_linux.go b/pkg/storage/fs/loader/loader_linux.go new file mode 100644 index 0000000000..1f9a667845 --- /dev/null +++ b/pkg/storage/fs/loader/loader_linux.go @@ -0,0 +1,25 @@ +// Copyright 2018-2024 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package loader + +import ( + // Load core storage filesystem backends. + _ "github.com/cs3org/reva/v2/pkg/storage/fs/posix" + // Add your own here +) diff --git a/pkg/storage/fs/posix/blobstore/blobstore.go b/pkg/storage/fs/posix/blobstore/blobstore.go index 7afd3eeb55..eee4a3f8c4 100644 --- a/pkg/storage/fs/posix/blobstore/blobstore.go +++ b/pkg/storage/fs/posix/blobstore/blobstore.go @@ -20,14 +20,10 @@ package blobstore import ( "bufio" - "fmt" "io" "os" - "path/filepath" - "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/v2/pkg/utils" "github.com/pkg/errors" ) @@ -38,11 +34,6 @@ type Blobstore struct { // New returns a new Blobstore func New(root string) (*Blobstore, error) { - err := os.MkdirAll(root, 0700) - if err != nil { - return nil, err - } - return &Blobstore{ root: root, }, nil @@ -50,35 +41,21 @@ func New(root string) (*Blobstore, error) { // Upload stores some data in the blobstore under the given key func (bs *Blobstore) Upload(node *node.Node, source string) error { - dest, err := bs.path(node) - if err != nil { - return err - } - // ensure parent path exists - if err := os.MkdirAll(filepath.Dir(dest), 0700); err != nil { - return errors.Wrap(err, "Decomposedfs: oCIS blobstore: error creating parent folders for blob") - } - - if err := os.Rename(source, dest); err == nil { - return nil - } - - // Rename failed, file needs to be copied. file, err := os.Open(source) if err != nil { return errors.Wrap(err, "Decomposedfs: oCIS blobstore: Can not open source file to upload") } defer file.Close() - f, err := os.OpenFile(dest, os.O_CREATE|os.O_WRONLY, 0700) + f, err := os.OpenFile(node.InternalPath(), os.O_CREATE|os.O_WRONLY, 0700) if err != nil { - return errors.Wrapf(err, "could not open blob '%s' for writing", dest) + return errors.Wrapf(err, "could not open blob '%s' for writing", node.InternalPath()) } w := bufio.NewWriter(f) _, err = w.ReadFrom(file) if err != nil { - return errors.Wrapf(err, "could not write blob '%s'", dest) + return errors.Wrapf(err, "could not write blob '%s'", node.InternalPath()) } return w.Flush() @@ -86,37 +63,14 @@ func (bs *Blobstore) Upload(node *node.Node, source string) error { // Download retrieves a blob from the blobstore for reading func (bs *Blobstore) Download(node *node.Node) (io.ReadCloser, error) { - dest, err := bs.path(node) + file, err := os.Open(node.InternalPath()) if err != nil { - return nil, err - } - file, err := os.Open(dest) - if err != nil { - return nil, errors.Wrapf(err, "could not read blob '%s'", dest) + return nil, errors.Wrapf(err, "could not read blob '%s'", node.InternalPath()) } return file, nil } // Delete deletes a blob from the blobstore func (bs *Blobstore) Delete(node *node.Node) error { - dest, err := bs.path(node) - if err != nil { - return err - } - if err := utils.RemoveItem(dest); err != nil { - return errors.Wrapf(err, "could not delete blob '%s'", dest) - } return nil } - -func (bs *Blobstore) path(node *node.Node) (string, error) { - if node.BlobID == "" { - return "", fmt.Errorf("blobstore: BlobID is empty") - } - return filepath.Join( - bs.root, - filepath.Clean(filepath.Join( - "/", "spaces", lookup.Pathify(node.SpaceID, 1, 2), "blobs", lookup.Pathify(node.BlobID, 4, 2)), - ), - ), nil -} diff --git a/pkg/storage/fs/posix/lookup/lookup.go b/pkg/storage/fs/posix/lookup/lookup.go index 111177d975..7dc9780db5 100644 --- a/pkg/storage/fs/posix/lookup/lookup.go +++ b/pkg/storage/fs/posix/lookup/lookup.go @@ -24,16 +24,20 @@ import ( "os" "path/filepath" "strings" + "syscall" user "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/v2/pkg/appctx" "github.com/cs3org/reva/v2/pkg/errtypes" + "github.com/cs3org/reva/v2/pkg/storage/fs/posix/options" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/usermapper" "github.com/cs3org/reva/v2/pkg/storage/utils/templates" + "github.com/cs3org/reva/v2/pkg/storagespace" + "github.com/google/uuid" "github.com/pkg/errors" "github.com/rogpeppe/go-internal/lockedfile" "go.opentelemetry.io/otel" @@ -43,24 +47,127 @@ import ( var tracer trace.Tracer var _spaceTypePersonal = "personal" +var _spaceTypeProject = "project" func init() { tracer = otel.Tracer("github.com/cs3org/reva/pkg/storage/utils/decomposedfs/lookup") } +// IDCache is a cache for node ids +type IDCache interface { + Get(ctx context.Context, spaceID, nodeID string) (string, bool) + Set(ctx context.Context, spaceID, nodeID, val string) error +} + // Lookup implements transformations from filepath to node and back type Lookup struct { Options *options.Options + IDCache IDCache metadataBackend metadata.Backend + userMapper usermapper.Mapper } // New returns a new Lookup instance -func New(b metadata.Backend, o *options.Options) *Lookup { - return &Lookup{ +func New(b metadata.Backend, um usermapper.Mapper, o *options.Options) *Lookup { + lu := &Lookup{ Options: o, metadataBackend: b, + IDCache: NewStoreIDCache(&o.Options), + userMapper: um, } + + go func() { + _ = lu.WarmupIDCache(o.Root) + }() + + return lu +} + +// CacheID caches the id for the given space and node id +func (lu *Lookup) CacheID(ctx context.Context, spaceID, nodeID, val string) error { + return lu.IDCache.Set(ctx, spaceID, nodeID, val) +} + +// GetCachedID returns the cached id for the given space and node id +func (lu *Lookup) GetCachedID(ctx context.Context, spaceID, nodeID string) (string, bool) { + return lu.IDCache.Get(ctx, spaceID, nodeID) +} + +// WarmupIDCache warms up the id cache +func (lu *Lookup) WarmupIDCache(root string) error { + spaceID := []byte("") + + var gid int + + return filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + attribs, err := lu.metadataBackend.All(context.Background(), path) + if err == nil { + nodeSpaceID, ok := attribs[prefixes.SpaceIDAttr] + if ok { + spaceID = nodeSpaceID + + // set the uid and gid for the space + fi, err := os.Stat(path) + if err != nil { + return err + } + sys := fi.Sys().(*syscall.Stat_t) + gid = int(sys.Gid) + _, err = lu.userMapper.ScopeUserByIds(-1, gid) + if err != nil { + return err + } + } + + if len(spaceID) == 0 { + // try to find space + spaceCandidate := path + for strings.HasPrefix(spaceCandidate, lu.Options.Root) { + spaceID, err = lu.MetadataBackend().Get(context.Background(), spaceCandidate, prefixes.SpaceIDAttr) + if err == nil { + if lu.Options.UseSpaceGroups { + // set the uid and gid for the space + fi, err := os.Stat(spaceCandidate) + if err != nil { + return err + } + sys := fi.Sys().(*syscall.Stat_t) + gid := int(sys.Gid) + _, err = lu.userMapper.ScopeUserByIds(-1, gid) + if err != nil { + return err + } + } + break + } + spaceCandidate = filepath.Dir(spaceCandidate) + } + } + + id, ok := attribs[prefixes.IDAttr] + if ok && len(spaceID) > 0 { + _ = lu.IDCache.Set(context.Background(), string(spaceID), string(id), path) + } + } + return nil + }) +} + +// NodeFromPath returns the node for the given path +func (lu *Lookup) NodeIDFromParentAndName(ctx context.Context, parent *node.Node, name string) (string, error) { + id, err := lu.metadataBackend.Get(ctx, filepath.Join(parent.InternalPath(), name), prefixes.IDAttr) + if err != nil { + if metadata.IsNotExist(err) { + return "", errtypes.NotFound(name) + } + return "", err + } + return string(id), nil } // MetadataBackend returns the metadata backend @@ -118,17 +225,6 @@ func (lu *Lookup) TypeFromPath(ctx context.Context, path string) provider.Resour return t } -func (lu *Lookup) NodeIDFromParentAndName(ctx context.Context, parent *node.Node, name string) (string, error) { - id, err := lu.metadataBackend.Get(ctx, filepath.Join(parent.InternalPath(), name), prefixes.IDAttr) - if err != nil { - if metadata.IsNotExist(err) { - return "", errtypes.NotFound(name) - } - return "", err - } - return string(id), nil -} - // NodeFromResource takes in a request path or request id and converts it to a Node func (lu *Lookup) NodeFromResource(ctx context.Context, ref *provider.Reference) (*node.Node, error) { ctx, span := tracer.Start(ctx, "NodeFromResource") @@ -272,11 +368,9 @@ func (lu *Lookup) InternalRoot() string { // InternalPath returns the internal path for a given ID func (lu *Lookup) InternalPath(spaceID, nodeID string) string { - return filepath.Join(lu.Options.Root, "spaces", Pathify(spaceID, 1, 2), "nodes", Pathify(nodeID, 4, 2)) -} + path, _ := lu.IDCache.Get(context.Background(), spaceID, nodeID) -func (lu *Lookup) SpacePath(spaceID string) string { - return filepath.Join(lu.Options.Root, spaceID) + return path } // // ReferenceFromAttr returns a CS3 reference from xattr of a node. @@ -358,30 +452,25 @@ func (lu *Lookup) CopyMetadataWithSourceLock(ctx context.Context, sourcePath, ta // GenerateSpaceID generates a space id for the given space type and owner func (lu *Lookup) GenerateSpaceID(spaceType string, owner *user.User) (string, error) { switch spaceType { + case _spaceTypeProject: + return uuid.New().String(), nil case _spaceTypePersonal: - return templates.WithUser(owner, lu.Options.UserLayout), nil - default: - return "", fmt.Errorf("unsupported space type: %s", spaceType) - } -} + path := templates.WithUser(owner, lu.Options.UserLayout) -// DetectBackendOnDisk returns the name of the metadata backend being used on disk -func DetectBackendOnDisk(root string) string { - matches, _ := filepath.Glob(filepath.Join(root, "spaces", "*", "*")) - if len(matches) > 0 { - base := matches[len(matches)-1] - spaceid := strings.ReplaceAll( - strings.TrimPrefix(base, filepath.Join(root, "spaces")), - "/", "") - spaceRoot := Pathify(spaceid, 4, 2) - _, err := os.Stat(filepath.Join(base, "nodes", spaceRoot+".mpk")) - if err == nil { - return "mpk" + spaceID, err := lu.metadataBackend.Get(context.Background(), filepath.Join(lu.Options.Root, path), prefixes.IDAttr) + if err != nil { + if metadata.IsNotExist(err) || metadata.IsAttrUnset(err) { + return uuid.New().String(), nil + } else { + return "", err + } } - _, err = os.Stat(filepath.Join(base, "nodes", spaceRoot+".ini")) - if err == nil { - return "ini" + resID, err := storagespace.ParseID(string(spaceID)) + if err != nil { + return "", err } + return resID.SpaceId, nil + default: + return "", fmt.Errorf("unsupported space type: %s", spaceType) } - return "xattrs" } diff --git a/pkg/storage/fs/posix/lookup/store_idcache.go b/pkg/storage/fs/posix/lookup/store_idcache.go new file mode 100644 index 0000000000..a7690052c0 --- /dev/null +++ b/pkg/storage/fs/posix/lookup/store_idcache.go @@ -0,0 +1,69 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package lookup + +import ( + "context" + + microstore "go-micro.dev/v4/store" + + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options" + "github.com/cs3org/reva/v2/pkg/store" +) + +type StoreIDCache struct { + cache microstore.Store +} + +// NewMemoryIDCache returns a new MemoryIDCache +func NewStoreIDCache(o *options.Options) *StoreIDCache { + return &StoreIDCache{ + cache: store.Create( + store.Store(o.IDCache.Store), + store.TTL(o.IDCache.TTL), + store.Size(o.IDCache.Size), + microstore.Nodes(o.IDCache.Nodes...), + microstore.Database(o.IDCache.Database), + microstore.Table(o.IDCache.Table), + store.DisablePersistence(o.IDCache.DisablePersistence), + store.Authentication(o.IDCache.AuthUsername, o.IDCache.AuthPassword), + ), + } +} + +// Add adds a new entry to the cache +func (c *StoreIDCache) Set(_ context.Context, spaceID, nodeID, val string) error { + return c.cache.Write(µstore.Record{ + Key: cacheKey(spaceID, nodeID), + Value: []byte(val), + }) +} + +// Get returns the value for a given key +func (c *StoreIDCache) Get(_ context.Context, spaceID, nodeID string) (string, bool) { + records, err := c.cache.Read(cacheKey(spaceID, nodeID)) + if err != nil { + return "", false + } + return string(records[0].Value), true +} + +func cacheKey(spaceid, nodeID string) string { + return spaceid + "!" + nodeID +} diff --git a/pkg/storage/fs/posix/options/options.go b/pkg/storage/fs/posix/options/options.go new file mode 100644 index 0000000000..f55f1029e1 --- /dev/null +++ b/pkg/storage/fs/posix/options/options.go @@ -0,0 +1,52 @@ +// Copyright 2018-2024 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package options + +import ( + decomposedoptions "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" +) + +type Options struct { + decomposedoptions.Options + + UseSpaceGroups bool `mapstructure:"use_space_groups"` + + WatchType string `mapstructure:"watch_type"` + WatchPath string `mapstructure:"watch_path"` + WatchFolderKafkaBrokers string `mapstructure:"watch_folder_kafka_brokers"` +} + +// New returns a new Options instance for the given configuration +func New(m map[string]interface{}) (*Options, error) { + o := &Options{} + if err := mapstructure.Decode(m, o); err != nil { + err = errors.Wrap(err, "error decoding conf") + return nil, err + } + + do, err := decomposedoptions.New(m) + if err != nil { + return nil, err + } + o.Options = *do + + return o, nil +} diff --git a/pkg/storage/fs/posix/posix.go b/pkg/storage/fs/posix/posix.go index f354f0a56a..eb18136833 100644 --- a/pkg/storage/fs/posix/posix.go +++ b/pkg/storage/fs/posix/posix.go @@ -1,3 +1,6 @@ +//go:build linux +// +build linux + // Copyright 2018-2021 CERN // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,9 +22,12 @@ package posix import ( + "context" "fmt" - "path" + "os" + "syscall" + tusd "github.com/tus/tusd/pkg/handler" microstore "go-micro.dev/v4/store" "github.com/cs3org/reva/v2/pkg/events" @@ -29,21 +35,31 @@ import ( "github.com/cs3org/reva/v2/pkg/storage" "github.com/cs3org/reva/v2/pkg/storage/fs/posix/blobstore" "github.com/cs3org/reva/v2/pkg/storage/fs/posix/lookup" + "github.com/cs3org/reva/v2/pkg/storage/fs/posix/options" "github.com/cs3org/reva/v2/pkg/storage/fs/posix/tree" "github.com/cs3org/reva/v2/pkg/storage/fs/registry" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/aspects" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/permissions" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/upload" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/usermapper" + "github.com/cs3org/reva/v2/pkg/storage/utils/middleware" "github.com/cs3org/reva/v2/pkg/store" + "github.com/pkg/errors" ) func init() { registry.Register("posix", New) } +type posixFS struct { + storage.FS + + um usermapper.Mapper +} + // New returns an implementation to of the storage.FS interface that talk to // a local filesystem. func New(m map[string]interface{}, stream events.Stream) (storage.FS, error) { @@ -52,22 +68,24 @@ func New(m map[string]interface{}, stream events.Stream) (storage.FS, error) { return nil, err } - bs, err := blobstore.New(path.Join(o.Root)) + bs, err := blobstore.New(o.Root) if err != nil { return nil, err } + um := usermapper.NewUnixMapper() + var lu *lookup.Lookup switch o.MetadataBackend { case "xattrs": - lu = lookup.New(metadata.XattrsBackend{}, o) + lu = lookup.New(metadata.XattrsBackend{}, um, o) case "messagepack": - lu = lookup.New(metadata.NewMessagePackBackend(o.Root, o.FileMetadataCache), o) + lu = lookup.New(metadata.NewMessagePackBackend(o.Root, o.FileMetadataCache), um, o) default: return nil, fmt.Errorf("unknown metadata backend %s, only 'messagepack' or 'xattrs' (default) supported", o.MetadataBackend) } - tp := tree.New(lu, bs, o, store.Create( + tp, err := tree.New(lu, bs, um, o, store.Create( store.Store(o.IDCache.Store), store.TTL(o.IDCache.TTL), store.Size(o.IDCache.Size), @@ -77,6 +95,9 @@ func New(m map[string]interface{}, stream events.Stream) (storage.FS, error) { store.DisablePersistence(o.IDCache.DisablePersistence), store.Authentication(o.IDCache.AuthUsername, o.IDCache.AuthPassword), )) + if err != nil { + return nil, err + } permissionsSelector, err := pool.PermissionsSelector(o.PermissionsSVC, pool.WithTLSMode(o.PermTLSMode)) if err != nil { @@ -86,15 +107,98 @@ func New(m map[string]interface{}, stream events.Stream) (storage.FS, error) { p := permissions.NewPermissions(node.NewPermissions(lu), permissionsSelector) aspects := aspects.Aspects{ - Lookup: lu, - Tree: tp, - Permissions: p, - EventStream: stream, + Lookup: lu, + Tree: tp, + Permissions: p, + EventStream: stream, + UserMapper: um, + DisableVersioning: true, } - fs, err := decomposedfs.New(o, aspects) + + dfs, err := decomposedfs.New(&o.Options, aspects) if err != nil { return nil, err } + hooks := []middleware.Hook{} + if o.UseSpaceGroups { + resolveSpaceHook := func(methodName string, ctx context.Context, spaceID string) (context.Context, middleware.UnHook, error) { + if spaceID == "" { + return ctx, nil, nil + } + + spaceRoot := lu.InternalPath(spaceID, spaceID) + fi, err := os.Stat(spaceRoot) + if err != nil { + return ctx, nil, err + } + + ctx = context.WithValue(ctx, decomposedfs.CtxKeySpaceGID, fi.Sys().(*syscall.Stat_t).Gid) + + return ctx, nil, err + } + scopeSpaceGroupHook := func(methodName string, ctx context.Context, spaceID string) (context.Context, middleware.UnHook, error) { + spaceGID, ok := ctx.Value(decomposedfs.CtxKeySpaceGID).(uint32) + if !ok { + return ctx, nil, nil + } + + unscope, err := um.ScopeUserByIds(-1, int(spaceGID)) + if err != nil { + return ctx, nil, errors.Wrap(err, "failed to scope user") + } + + return ctx, unscope, nil + } + hooks = append(hooks, resolveSpaceHook, scopeSpaceGroupHook) + } + + mw := middleware.NewFS(dfs, hooks...) + fs := &posixFS{ + FS: mw, + um: um, + } + return fs, nil } + +// ListUploadSessions returns the upload sessions matching the given filter +func (fs *posixFS) ListUploadSessions(ctx context.Context, filter storage.UploadSessionFilter) ([]storage.UploadSession, error) { + return fs.FS.(storage.UploadSessionLister).ListUploadSessions(ctx, filter) +} + +// UseIn tells the tus upload middleware which extensions it supports. +func (fs *posixFS) UseIn(composer *tusd.StoreComposer) { + fs.FS.(storage.ComposableFS).UseIn(composer) +} + +// NewUpload returns a new tus Upload instance +func (fs *posixFS) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd.Upload, err error) { + return fs.FS.(tusd.DataStore).NewUpload(ctx, info) +} + +// NewUpload returns a new tus Upload instance +func (fs *posixFS) GetUpload(ctx context.Context, id string) (upload tusd.Upload, err error) { + return fs.FS.(tusd.DataStore).GetUpload(ctx, id) +} + +// AsTerminatableUpload returns a TerminatableUpload +// To implement the termination extension as specified in https://tus.io/protocols/resumable-upload.html#termination +// the storage needs to implement AsTerminatableUpload +func (fs *posixFS) AsTerminatableUpload(up tusd.Upload) tusd.TerminatableUpload { + return up.(*upload.OcisSession) +} + +// AsLengthDeclarableUpload returns a LengthDeclarableUpload +// To implement the creation-defer-length extension as specified in https://tus.io/protocols/resumable-upload.html#creation +// the storage needs to implement AsLengthDeclarableUpload +func (fs *posixFS) AsLengthDeclarableUpload(up tusd.Upload) tusd.LengthDeclarableUpload { + return up.(*upload.OcisSession) +} + +// AsConcatableUpload returns a ConcatableUpload +// To implement the concatenation extension as specified in https://tus.io/protocols/resumable-upload.html#concatenation +// the storage needs to implement AsConcatableUpload +func (fs *posixFS) AsConcatableUpload(up tusd.Upload) tusd.ConcatableUpload { + return up.(*upload.OcisSession) +} diff --git a/pkg/storage/fs/posix/testhelpers/helpers.go b/pkg/storage/fs/posix/testhelpers/helpers.go new file mode 100644 index 0000000000..b0f5bd8b28 --- /dev/null +++ b/pkg/storage/fs/posix/testhelpers/helpers.go @@ -0,0 +1,380 @@ +// Copyright 2018-2024 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package helpers + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/google/uuid" + "github.com/stretchr/testify/mock" + "google.golang.org/grpc" + + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + cs3permissions "github.com/cs3org/go-cs3apis/cs3/permissions/v1beta1" + v1beta11 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + providerv1beta1 "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + ruser "github.com/cs3org/reva/v2/pkg/ctx" + "github.com/cs3org/reva/v2/pkg/rgrpc/todo/pool" + "github.com/cs3org/reva/v2/pkg/storage/fs/posix/lookup" + "github.com/cs3org/reva/v2/pkg/storage/fs/posix/options" + "github.com/cs3org/reva/v2/pkg/storage/fs/posix/tree" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/aspects" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/permissions" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/permissions/mocks" + treemocks "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree/mocks" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/usermapper" + "github.com/cs3org/reva/v2/pkg/storagespace" + "github.com/cs3org/reva/v2/pkg/store" + "github.com/cs3org/reva/v2/tests/helpers" +) + +// TestEnv represents a test environment for unit tests +type TestEnv struct { + Root string + Fs *decomposedfs.Decomposedfs + Tree *tree.Tree + Permissions *mocks.PermissionsChecker + Blobstore *treemocks.Blobstore + Owner *userpb.User + DeleteAllSpacesUser *userpb.User + DeleteHomeSpacesUser *userpb.User + Users []*userpb.User + Lookup *lookup.Lookup + Ctx context.Context + SpaceRootRes *providerv1beta1.ResourceId + PermissionsClient *mocks.CS3PermissionsClient + Options *options.Options +} + +// Constant UUIDs for the space users +const ( + OwnerID = "25b69780-5f39-43be-a7ac-a9b9e9fe4230" + DeleteAllSpacesUserID = "39885dbc-68c0-47c0-a873-9d5e5646dceb" + DeleteHomeSpacesUserID = "ca8c6bf1-36a7-4d10-87a5-a2806566f983" + User0ID = "824385ae-8fc6-4896-8eb2-d1d171290bd0" + User1ID = "693b0d96-80a2-4016-b53d-425ce4f66114" +) + +// NewTestEnv prepares a test environment on disk +// The storage contains some directories and a file: +// +// /dir1/ +// /dir1/file1 +// /dir1/subdir1/ +// +// The default config can be overridden by providing the strings to override +// via map as a parameter +func NewTestEnv(config map[string]interface{}) (*TestEnv, error) { + um := &usermapper.NullMapper{} + + tmpRoot, err := helpers.TempDir("reva-unit-tests-*-root") + if err != nil { + return nil, err + } + defaultConfig := map[string]interface{}{ + "root": tmpRoot, + "treetime_accounting": true, + "treesize_accounting": true, + "personalspacepath_template": "users/{{.User.Username}}", + "generalspacepath_template": "projects/{{.SpaceId}}", + } + // make it possible to override single config values + for k, v := range config { + defaultConfig[k] = v + } + + o, err := options.New(defaultConfig) + if err != nil { + return nil, err + } + + owner := &userpb.User{ + Id: &userpb.UserId{ + Idp: "idp", + OpaqueId: OwnerID, + Type: userpb.UserType_USER_TYPE_PRIMARY, + }, + Username: "username", + } + deleteHomeSpacesUser := &userpb.User{ + Id: &userpb.UserId{ + Idp: "idp", + OpaqueId: DeleteHomeSpacesUserID, + Type: userpb.UserType_USER_TYPE_PRIMARY, + }, + Username: "username", + } + deleteAllSpacesUser := &userpb.User{ + Id: &userpb.UserId{ + Idp: "idp", + OpaqueId: DeleteAllSpacesUserID, + Type: userpb.UserType_USER_TYPE_PRIMARY, + }, + Username: "username", + } + users := []*userpb.User{ + { + Id: &userpb.UserId{ + Idp: "idp", + OpaqueId: User0ID, + Type: userpb.UserType_USER_TYPE_PRIMARY, + }, + }, + { + Id: &userpb.UserId{ + Idp: "idp", + OpaqueId: User1ID, + Type: userpb.UserType_USER_TYPE_PRIMARY, + }, + }, + } + var lu *lookup.Lookup + switch o.MetadataBackend { + case "xattrs": + lu = lookup.New(metadata.XattrsBackend{}, um, o) + case "messagepack": + lu = lookup.New(metadata.NewMessagePackBackend(o.Root, o.FileMetadataCache), um, o) + default: + return nil, fmt.Errorf("unknown metadata backend %s", o.MetadataBackend) + } + + pmock := &mocks.PermissionsChecker{} + + cs3permissionsclient := &mocks.CS3PermissionsClient{} + pool.RemoveSelector("PermissionsSelector" + "any") + permissionsSelector := pool.GetSelector[cs3permissions.PermissionsAPIClient]( + "PermissionsSelector", + "any", + func(cc *grpc.ClientConn) cs3permissions.PermissionsAPIClient { + return cs3permissionsclient + }, + ) + + bs := &treemocks.Blobstore{} + tree, err := tree.New(lu, bs, um, o, store.Create()) + if err != nil { + return nil, err + } + aspects := aspects.Aspects{ + Lookup: lu, + Tree: tree, + Permissions: permissions.NewPermissions(pmock, permissionsSelector), + } + fs, err := decomposedfs.New(&o.Options, aspects) + if err != nil { + return nil, err + } + ctx := ruser.ContextSetUser(context.Background(), owner) + + tmpFs, _ := fs.(*decomposedfs.Decomposedfs) + + env := &TestEnv{ + Root: tmpRoot, + Fs: tmpFs, + Tree: tree, + Lookup: lu, + Permissions: pmock, + Blobstore: bs, + Owner: owner, + DeleteAllSpacesUser: deleteAllSpacesUser, + DeleteHomeSpacesUser: deleteHomeSpacesUser, + Users: users, + Ctx: ctx, + PermissionsClient: cs3permissionsclient, + Options: o, + } + + env.SpaceRootRes, err = env.CreateTestStorageSpace("personal", nil) + return env, err +} + +// Cleanup removes all files from disk +func (t *TestEnv) Cleanup() { + os.RemoveAll(t.Root) +} + +// CreateTestDir create a directory and returns a corresponding Node +func (t *TestEnv) CreateTestDir(name string, parentRef *providerv1beta1.Reference) (*node.Node, error) { + ref := parentRef + ref.Path = name + + err := t.Fs.CreateDir(t.Ctx, ref) + if err != nil { + return nil, err + } + + ref.Path = name + n, err := t.Lookup.NodeFromResource(t.Ctx, ref) + if err != nil { + return nil, err + } + + return n, nil +} + +// CreateTestFile creates a new file and its metadata and returns a corresponding Node +func (t *TestEnv) CreateTestFile(name, blobID, parentID, spaceID string, blobSize int64) (*node.Node, error) { + // Create n in dir1 + n := node.New( + spaceID, + uuid.New().String(), + parentID, + name, + blobSize, + blobID, + providerv1beta1.ResourceType_RESOURCE_TYPE_FILE, + nil, + t.Lookup, + ) + nodePath := filepath.Join(n.ParentPath(), n.Name) + if err := os.MkdirAll(filepath.Dir(nodePath), 0700); err != nil { + return nil, err + } + _, err := os.OpenFile(nodePath, os.O_CREATE, 0700) + if err != nil { + return nil, err + } + err = t.Lookup.CacheID(t.Ctx, spaceID, n.ID, nodePath) + if err != nil { + return nil, err + } + err = n.SetXattrs(n.NodeMetadata(t.Ctx), true) + if err != nil { + return nil, err + } + if err := n.FindStorageSpaceRoot(t.Ctx); err != nil { + return nil, err + } + + return n, t.Tree.Propagate(t.Ctx, n, blobSize) + +} + +// CreateTestStorageSpace will create a storage space with some directories and files +// It returns the ResourceId of the space +// +// /dir1/ +// /dir1/file1 +// /dir1/subdir1 +func (t *TestEnv) CreateTestStorageSpace(typ string, quota *providerv1beta1.Quota) (*providerv1beta1.ResourceId, error) { + t.PermissionsClient.On("CheckPermission", mock.Anything, mock.Anything, mock.Anything).Times(1).Return(&cs3permissions.CheckPermissionResponse{ + Status: &v1beta11.Status{Code: v1beta11.Code_CODE_OK}, + }, nil) + // Permissions required for setup below + t.Permissions.On("AssemblePermissions", mock.Anything, mock.Anything, mock.Anything).Return(providerv1beta1.ResourcePermissions{ + Stat: true, + AddGrant: true, + }, nil).Times(1) // + + var owner *userpb.User + if typ == "personal" { + owner = t.Owner + } + space, err := t.Fs.CreateStorageSpace(t.Ctx, &providerv1beta1.CreateStorageSpaceRequest{ + Owner: owner, + Type: typ, + Quota: quota, + }) + if err != nil { + return nil, err + } + + ref := buildRef(space.StorageSpace.Id.OpaqueId, "") + + // the space name attribute is the stop condition in the lookup + sid, err := storagespace.ParseID(space.StorageSpace.Id.OpaqueId) + if err != nil { + return nil, err + } + h, err := node.ReadNode(t.Ctx, t.Lookup, sid.SpaceId, sid.OpaqueId, false, nil, false) + if err != nil { + return nil, err + } + if err = h.SetXattr(t.Ctx, prefixes.SpaceNameAttr, []byte("username")); err != nil { + return nil, err + } + + // Create dir1 + t.Permissions.On("AssemblePermissions", mock.Anything, mock.Anything, mock.Anything).Return(providerv1beta1.ResourcePermissions{ + Stat: true, + CreateContainer: true, + }, nil).Times(1) // Permissions required for setup below + dir1, err := t.CreateTestDir("./dir1", ref) + if err != nil { + return nil, err + } + + // Create file1 in dir1 + _, err = t.CreateTestFile("file1", "file1-blobid", dir1.ID, dir1.SpaceID, 1234) + if err != nil { + return nil, err + } + + // Create subdir1 in dir1 + t.Permissions.On("AssemblePermissions", mock.Anything, mock.Anything, mock.Anything).Return(providerv1beta1.ResourcePermissions{ + Stat: true, + CreateContainer: true, + }, nil).Times(1) // Permissions required for setup below + ref.Path = "./dir1/subdir1" + err = t.Fs.CreateDir(t.Ctx, ref) + if err != nil { + return nil, err + } + + _, err = dir1.Child(t.Ctx, "subdir1, ref") + if err != nil { + return nil, err + } + + // Create emptydir + t.Permissions.On("AssemblePermissions", mock.Anything, mock.Anything, mock.Anything).Return(providerv1beta1.ResourcePermissions{ + Stat: true, + CreateContainer: true, + }, nil).Times(1) // Permissions required for setup below + ref.Path = "/emptydir" + err = t.Fs.CreateDir(t.Ctx, ref) + if err != nil { + return nil, err + } + + return ref.ResourceId, nil +} + +// shortcut to get a ref +func buildRef(id, path string) *providerv1beta1.Reference { + res, err := storagespace.ParseID(id) + if err != nil { + return nil + } + return &providerv1beta1.Reference{ + ResourceId: &providerv1beta1.ResourceId{ + StorageId: res.StorageId, + SpaceId: res.SpaceId, + OpaqueId: res.OpaqueId, + }, + Path: path, + } +} diff --git a/pkg/storage/fs/posix/tree/assimilation.go b/pkg/storage/fs/posix/tree/assimilation.go new file mode 100644 index 0000000000..9070432ded --- /dev/null +++ b/pkg/storage/fs/posix/tree/assimilation.go @@ -0,0 +1,277 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package tree + +import ( + "context" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + "time" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/v2/pkg/storage/fs/posix/lookup" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/rs/zerolog/log" +) + +type ScanDebouncer struct { + after time.Duration + f func(item scanItem) + pending map[string]*time.Timer + inProgress sync.Map + + mutex sync.Mutex +} + +// NewScanDebouncer returns a new SpaceDebouncer instance +func NewScanDebouncer(d time.Duration, f func(item scanItem)) *ScanDebouncer { + return &ScanDebouncer{ + after: d, + f: f, + pending: map[string]*time.Timer{}, + inProgress: sync.Map{}, + } +} + +// Debounce restars the debounce timer for the given space +func (d *ScanDebouncer) Debounce(item scanItem) { + d.mutex.Lock() + defer d.mutex.Unlock() + + path := item.Path + force := item.ForceRescan + if t := d.pending[item.Path]; t != nil { + force = force || item.ForceRescan + t.Stop() + } + + d.pending[item.Path] = time.AfterFunc(d.after, func() { + if _, ok := d.inProgress.Load(path); ok { + // Reschedule this run for when the previous run has finished + d.mutex.Lock() + d.pending[path].Reset(d.after) + d.mutex.Unlock() + return + } + + d.inProgress.Store(path, true) + defer d.inProgress.Delete(path) + d.f(scanItem{ + Path: path, + ForceRescan: force, + }) + }) +} + +func (t *Tree) workScanQueue() { + for i := 0; i < t.options.MaxConcurrency; i++ { + go func() { + for { + item := <-t.scanQueue + + err := t.assimilate(item) + if err != nil { + log.Error().Err(err).Str("path", item.Path).Msg("failed to assimilate item") + continue + } + } + }() + } +} + +// Scan scans the given path and updates the id chache +func (t *Tree) Scan(path string, forceRescan bool) error { + t.scanDebouncer.Debounce(scanItem{ + Path: path, + ForceRescan: forceRescan, + }) + return nil +} + +func (t *Tree) assimilate(item scanItem) error { + var err error + // find the space id, scope by the according user + spaceID := []byte("") + spaceCandidate := item.Path + for strings.HasPrefix(spaceCandidate, t.options.Root) { + spaceID, err = t.lookup.MetadataBackend().Get(context.Background(), spaceCandidate, prefixes.SpaceIDAttr) + if err == nil { + if t.options.UseSpaceGroups { + // set the uid and gid for the space + fi, err := os.Stat(spaceCandidate) + if err != nil { + return err + } + sys := fi.Sys().(*syscall.Stat_t) + gid := int(sys.Gid) + _, err = t.userMapper.ScopeUserByIds(-1, gid) + if err != nil { + return err + } + } + break + } + spaceCandidate = filepath.Dir(spaceCandidate) + } + if len(spaceID) == 0 { + return fmt.Errorf("did not find space id for path") + } + + var id []byte + if !item.ForceRescan { + // already assimilated? + id, err := t.lookup.MetadataBackend().Get(context.Background(), item.Path, prefixes.IDAttr) + if err == nil { + _ = t.lookup.(*lookup.Lookup).CacheID(context.Background(), string(spaceID), string(id), item.Path) + return nil + } + } + + // lock the file for assimilation + unlock, err := t.lookup.MetadataBackend().Lock(item.Path) + if err != nil { + return errors.Wrap(err, "failed to lock item for assimilation") + } + defer func() { + _ = unlock() + }() + + // check for the id attribute again after grabbing the lock, maybe the file was assimilated/created by us in the meantime + id, err = t.lookup.MetadataBackend().Get(context.Background(), item.Path, prefixes.IDAttr) + if err == nil { + _ = t.lookup.(*lookup.Lookup).CacheID(context.Background(), string(spaceID), string(id), item.Path) + if item.ForceRescan { + _, err = t.updateFile(item.Path, string(id), string(spaceID)) + if err != nil { + return err + } + } + } else { + // assimilate new file + newId := uuid.New().String() + _, err = t.updateFile(item.Path, newId, string(spaceID)) + if err != nil { + return err + } + } + return nil +} + +func (t *Tree) updateFile(path, id, spaceID string) (fs.FileInfo, error) { + retries := 1 + parentID := "" +assimilate: + if id != spaceID { + // read parent + parentAttribs, err := t.lookup.MetadataBackend().All(context.Background(), filepath.Dir(path)) + if err != nil { + return nil, fmt.Errorf("failed to read parent item attributes") + } + + if len(parentAttribs) == 0 || len(parentAttribs[prefixes.IDAttr]) == 0 { + if retries == 0 { + return nil, fmt.Errorf("got empty parent attribs even after assimilating") + } + + // assimilate parent first + err = t.assimilate(scanItem{Path: filepath.Dir(path), ForceRescan: false}) + if err != nil { + return nil, err + } + + // retry + retries-- + goto assimilate + } + parentID = string(parentAttribs[prefixes.IDAttr]) + } + + // assimilate file + fi, err := os.Stat(path) + if err != nil { + return nil, errors.Wrap(err, "failed to stat item") + } + + previousAttribs, err := t.lookup.MetadataBackend().All(context.Background(), path) + if err != nil && !metadata.IsAttrUnset(err) { + return nil, errors.Wrap(err, "failed to get item attribs") + } + + attributes := node.Attributes{ + prefixes.IDAttr: []byte(id), + prefixes.NameAttr: []byte(filepath.Base(path)), + prefixes.MTimeAttr: []byte(fi.ModTime().Format(time.RFC3339)), + } + if len(parentID) > 0 { + attributes[prefixes.ParentidAttr] = []byte(parentID) + } + + sha1h, md5h, adler32h, err := node.CalculateChecksums(context.Background(), path) + if err == nil { + attributes[prefixes.ChecksumPrefix+"sha1"] = sha1h.Sum(nil) + attributes[prefixes.ChecksumPrefix+"md5"] = md5h.Sum(nil) + attributes[prefixes.ChecksumPrefix+"adler32"] = adler32h.Sum(nil) + } + + if fi.IsDir() { + attributes.SetInt64(prefixes.TypeAttr, int64(provider.ResourceType_RESOURCE_TYPE_CONTAINER)) + attributes.SetInt64(prefixes.TreesizeAttr, 0) + if previousAttribs != nil && previousAttribs[prefixes.TreesizeAttr] != nil { + attributes[prefixes.TreesizeAttr] = previousAttribs[prefixes.TreesizeAttr] + } + attributes[prefixes.PropagationAttr] = []byte("1") + } else { + attributes.SetInt64(prefixes.TypeAttr, int64(provider.ResourceType_RESOURCE_TYPE_FILE)) + attributes.SetString(prefixes.BlobIDAttr, id) + attributes.SetInt64(prefixes.BlobsizeAttr, fi.Size()) + + // propagate the change + sizeDiff := fi.Size() + if previousAttribs != nil && previousAttribs[prefixes.BlobsizeAttr] != nil { + oldSize, err := attributes.Int64(prefixes.BlobsizeAttr) + if err == nil { + sizeDiff -= oldSize + } + } + + n := node.New(spaceID, id, parentID, filepath.Base(path), fi.Size(), "", provider.ResourceType_RESOURCE_TYPE_FILE, nil, t.lookup) + n.SpaceRoot = &node.Node{SpaceID: spaceID, ID: spaceID} + err = t.Propagate(context.Background(), n, sizeDiff) + if err != nil { + return nil, errors.Wrap(err, "failed to propagate") + } + } + err = t.lookup.MetadataBackend().SetMultiple(context.Background(), path, attributes, false) + if err != nil { + return nil, errors.Wrap(err, "failed to set attributes") + } + + _ = t.lookup.(*lookup.Lookup).CacheID(context.Background(), spaceID, id, path) + + return fi, nil +} diff --git a/pkg/storage/fs/posix/tree/gpfsfilauditloggingwatcher.go b/pkg/storage/fs/posix/tree/gpfsfilauditloggingwatcher.go new file mode 100644 index 0000000000..453da70960 --- /dev/null +++ b/pkg/storage/fs/posix/tree/gpfsfilauditloggingwatcher.go @@ -0,0 +1,85 @@ +package tree + +import ( + "bufio" + "encoding/json" + "io" + "os" + "strconv" + "time" + + "github.com/cs3org/reva/v2/pkg/storage/fs/posix/lookup" +) + +type GpfsFileAuditLoggingWatcher struct { + tree *Tree +} + +type lwe struct { + Event string + Path string + BytesWritten string +} + +func NewGpfsFileAuditLoggingWatcher(tree *Tree, auditLogFile string) (*GpfsFileAuditLoggingWatcher, error) { + w := &GpfsFileAuditLoggingWatcher{ + tree: tree, + } + + _, err := os.Stat(auditLogFile) + if err != nil { + return nil, err + } + + return w, nil +} + +func (w *GpfsFileAuditLoggingWatcher) Watch(path string) { +start: + file, err := os.Open(path) + if err != nil { + // try again later + time.Sleep(5 * time.Second) + goto start + } + defer file.Close() + + // Seek to the end of the file + _, err = file.Seek(0, io.SeekEnd) + if err != nil { + time.Sleep(5 * time.Second) + goto start + } + + reader := bufio.NewReader(file) + ev := &lwe{} + for { + line, err := reader.ReadString('\n') + switch err { + case nil: + err := json.Unmarshal([]byte(line), ev) + if err != nil { + continue + } + switch ev.Event { + case "CREATE": + go func() { _ = w.tree.Scan(ev.Path, false) }() + case "CLOSE": + bytesWritten, err := strconv.Atoi(ev.BytesWritten) + if err == nil && bytesWritten > 0 { + go func() { _ = w.tree.Scan(ev.Path, true) }() + } + case "RENAME": + go func() { + _ = w.tree.Scan(ev.Path, true) + _ = w.tree.lookup.(*lookup.Lookup).WarmupIDCache(ev.Path) + }() + } + case io.EOF: + time.Sleep(1 * time.Second) + default: + time.Sleep(5 * time.Second) + goto start + } + } +} diff --git a/pkg/storage/fs/posix/tree/gpfswatchfolderwatcher.go b/pkg/storage/fs/posix/tree/gpfswatchfolderwatcher.go new file mode 100644 index 0000000000..c32a9f50b8 --- /dev/null +++ b/pkg/storage/fs/posix/tree/gpfswatchfolderwatcher.go @@ -0,0 +1,67 @@ +package tree + +import ( + "context" + "encoding/json" + "log" + "strconv" + "strings" + + "github.com/cs3org/reva/v2/pkg/storage/fs/posix/lookup" + kafka "github.com/segmentio/kafka-go" +) + +type GpfsWatchFolderWatcher struct { + tree *Tree + brokers []string +} + +func NewGpfsWatchFolderWatcher(tree *Tree, kafkaBrokers []string) (*GpfsWatchFolderWatcher, error) { + return &GpfsWatchFolderWatcher{ + tree: tree, + brokers: kafkaBrokers, + }, nil +} + +func (w *GpfsWatchFolderWatcher) Watch(topic string) { + r := kafka.NewReader(kafka.ReaderConfig{ + Brokers: w.brokers, + GroupID: "ocis-posixfs", + Topic: topic, + }) + + lwev := &lwe{} + for { + m, err := r.ReadMessage(context.Background()) + if err != nil { + break + } + + err = json.Unmarshal(m.Value, lwev) + if err != nil { + continue + } + + if strings.HasSuffix(lwev.Path, ".flock") || strings.HasSuffix(lwev.Path, ".mlock") { + continue + } + + switch { + case strings.Contains(lwev.Event, "IN_CREATE"): + go func() { _ = w.tree.Scan(lwev.Path, false) }() + case strings.Contains(lwev.Event, "IN_CLOSE_WRITE"): + bytesWritten, err := strconv.Atoi(lwev.BytesWritten) + if err == nil && bytesWritten > 0 { + go func() { _ = w.tree.Scan(lwev.Path, true) }() + } + case strings.Contains(lwev.Event, "IN_MOVED_TO"): + go func() { + _ = w.tree.Scan(lwev.Path, true) + _ = w.tree.lookup.(*lookup.Lookup).WarmupIDCache(lwev.Path) + }() + } + } + if err := r.Close(); err != nil { + log.Fatal("failed to close reader:", err) + } +} diff --git a/pkg/storage/fs/posix/tree/inotifywatcher.go b/pkg/storage/fs/posix/tree/inotifywatcher.go new file mode 100644 index 0000000000..200468c68b --- /dev/null +++ b/pkg/storage/fs/posix/tree/inotifywatcher.go @@ -0,0 +1,73 @@ +package tree + +import ( + "fmt" + "strings" + + "github.com/cs3org/reva/v2/pkg/storage/fs/posix/lookup" + "github.com/pablodz/inotifywaitgo/inotifywaitgo" +) + +type InotifyWatcher struct { + tree *Tree +} + +func NewInotifyWatcher(tree *Tree) *InotifyWatcher { + return &InotifyWatcher{ + tree: tree, + } +} + +func (iw *InotifyWatcher) Watch(path string) { + events := make(chan inotifywaitgo.FileEvent) + errors := make(chan error) + + go inotifywaitgo.WatchPath(&inotifywaitgo.Settings{ + Dir: path, + FileEvents: events, + ErrorChan: errors, + KillOthers: true, + Options: &inotifywaitgo.Options{ + Recursive: true, + Events: []inotifywaitgo.EVENT{ + inotifywaitgo.CREATE, + inotifywaitgo.MOVED_TO, + inotifywaitgo.CLOSE_WRITE, + }, + Monitor: true, + }, + Verbose: false, + }) + + for { + select { + case event := <-events: + for _, e := range event.Events { + if strings.HasSuffix(event.Filename, ".flock") || strings.HasSuffix(event.Filename, ".mlock") { + continue + } + switch e { + case inotifywaitgo.CREATE: + go func() { _ = iw.tree.Scan(event.Filename, false) }() + case inotifywaitgo.MOVED_TO: + go func() { + _ = iw.tree.Scan(event.Filename, true) + _ = iw.tree.lookup.(*lookup.Lookup).WarmupIDCache(event.Filename) + }() + case inotifywaitgo.CLOSE_WRITE: + go func() { _ = iw.tree.Scan(event.Filename, true) }() + } + } + + case err := <-errors: + switch err.Error() { + case inotifywaitgo.NOT_INSTALLED: + panic("Error: inotifywait is not installed") + case inotifywaitgo.INVALID_EVENT: + // ignore + default: + fmt.Printf("Error: %s\n", err) + } + } + } +} diff --git a/pkg/storage/fs/posix/tree/tree.go b/pkg/storage/fs/posix/tree/tree.go index 88d24bc9a2..15dc8d7eda 100644 --- a/pkg/storage/fs/posix/tree/tree.go +++ b/pkg/storage/fs/posix/tree/tree.go @@ -30,23 +30,28 @@ import ( "strings" "time" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "go-micro.dev/v4/store" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/errgroup" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/v2/pkg/appctx" "github.com/cs3org/reva/v2/pkg/errtypes" - "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" + "github.com/cs3org/reva/v2/pkg/logger" + "github.com/cs3org/reva/v2/pkg/storage/fs/posix/lookup" + "github.com/cs3org/reva/v2/pkg/storage/fs/posix/options" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" - "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree/propagator" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/usermapper" "github.com/cs3org/reva/v2/pkg/utils" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/rs/zerolog/log" - "go-micro.dev/v4/store" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" - "golang.org/x/sync/errgroup" ) var tracer trace.Tracer @@ -62,6 +67,15 @@ type Blobstore interface { Delete(node *node.Node) error } +type Watcher interface { + Watch(path string) +} + +type scanItem struct { + Path string + ForceRescan bool +} + // Tree manages a hierarchical tree type Tree struct { lookup node.PathLookup @@ -70,26 +84,76 @@ type Tree struct { options *options.Options - idCache store.Store + userMapper usermapper.Mapper + idCache store.Store + watcher Watcher + scanQueue chan scanItem + scanDebouncer *ScanDebouncer + + log *zerolog.Logger } // PermissionCheckFunc defined a function used to check resource permissions type PermissionCheckFunc func(rp *provider.ResourcePermissions) bool // New returns a new instance of Tree -func New(lu node.PathLookup, bs Blobstore, o *options.Options, cache store.Store) *Tree { - return &Tree{ +func New(lu node.PathLookup, bs Blobstore, um usermapper.Mapper, o *options.Options, cache store.Store) (*Tree, error) { + log := logger.New() + scanQueue := make(chan scanItem) + t := &Tree{ lookup: lu, blobstore: bs, + userMapper: um, options: o, idCache: cache, - propagator: propagator.New(lu, o), + propagator: propagator.New(lu, &o.Options), + scanQueue: scanQueue, + scanDebouncer: NewScanDebouncer(500*time.Millisecond, func(item scanItem) { + scanQueue <- item + }), + log: log, + } + + watchPath := o.WatchPath + var err error + switch o.WatchType { + case "gpfswatchfolder": + t.watcher, err = NewGpfsWatchFolderWatcher(t, strings.Split(o.WatchFolderKafkaBrokers, ",")) + if err != nil { + return nil, err + } + case "gpfsfileauditlogging": + t.watcher, err = NewGpfsFileAuditLoggingWatcher(t, o.WatchPath) + if err != nil { + return nil, err + } + default: + t.watcher = NewInotifyWatcher(t) + watchPath = o.Root } + + // Start watching for fs events and put them into the queue + go t.watcher.Watch(watchPath) + + // Handle queued fs events + go t.workScanQueue() + + return t, nil } // Setup prepares the tree structure func (t *Tree) Setup() error { - return os.MkdirAll(t.options.Root, 0700) + err := os.MkdirAll(t.options.Root, 0700) + if err != nil { + return err + } + + err = os.MkdirAll(t.options.UploadDirectory, 0700) + if err != nil { + return err + } + + return nil } // GetMD returns the metadata of a node in the tree @@ -115,35 +179,48 @@ func (t *Tree) TouchFile(ctx context.Context, n *node.Node, markprocessing bool, return errtypes.AlreadyExists(n.ID) } + parentPath := n.ParentPath() + nodePath := filepath.Join(parentPath, n.Name) + + // lock the meta file + unlock, err := t.lookup.MetadataBackend().Lock(nodePath) + if err != nil { + return err + } + defer func() { + _ = unlock() + }() + if n.ID == "" { n.ID = uuid.New().String() } n.SetType(provider.ResourceType_RESOURCE_TYPE_FILE) - nodePath := n.InternalPath() + // Set id in cache + _ = t.lookup.(*lookup.Lookup).CacheID(context.Background(), n.SpaceID, n.ID, nodePath) + if err := os.MkdirAll(filepath.Dir(nodePath), 0700); err != nil { return errors.Wrap(err, "Decomposedfs: error creating node") } - _, err := os.Create(nodePath) + _, err = os.Create(nodePath) if err != nil { return errors.Wrap(err, "Decomposedfs: error creating node") } attributes := n.NodeMetadata(ctx) + attributes[prefixes.IDAttr] = []byte(n.ID) if markprocessing { attributes[prefixes.StatusPrefix] = []byte(node.ProcessingStatus) } + nodeMTime := time.Now() if mtime != "" { - if err := n.SetMtimeString(ctx, mtime); err != nil { - return errors.Wrap(err, "Decomposedfs: could not set mtime") - } - } else { - now := time.Now() - if err := n.SetMtime(ctx, &now); err != nil { - return errors.Wrap(err, "Decomposedfs: could not set mtime") + nodeMTime, err = utils.MTimeToTime(mtime) + if err != nil { + return err } } - err = n.SetXattrsWithContext(ctx, attributes, true) + attributes[prefixes.MTimeAttr] = []byte(nodeMTime.UTC().Format(time.RFC3339Nano)) + err = n.SetXattrsWithContext(ctx, attributes, false) if err != nil { return err } @@ -192,47 +269,9 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) } } - // remove cache entry in any case to avoid inconsistencies - defer func() { _ = t.idCache.Delete(filepath.Join(oldNode.ParentPath(), oldNode.Name)) }() - - // Always target the old node ID for xattr updates. - // The new node id is empty if the target does not exist - // and we need to overwrite the new one when overwriting an existing path. - // are we just renaming (parent stays the same)? - if oldNode.ParentID == newNode.ParentID { - - // parentPath := t.lookup.InternalPath(oldNode.SpaceID, oldNode.ParentID) - parentPath := oldNode.ParentPath() - - // rename child - err = os.Rename( - filepath.Join(parentPath, oldNode.Name), - filepath.Join(parentPath, newNode.Name), - ) - if err != nil { - return errors.Wrap(err, "Decomposedfs: could not rename child") - } - - // update name attribute - if err := oldNode.SetXattrString(ctx, prefixes.NameAttr, newNode.Name); err != nil { - return errors.Wrap(err, "Decomposedfs: could not set name attribute") - } - - return t.Propagate(ctx, newNode, 0) - } - // we are moving the node to a new parent, any target has been removed // bring old node to the new parent - // rename child - err = os.Rename( - filepath.Join(oldNode.ParentPath(), oldNode.Name), - filepath.Join(newNode.ParentPath(), newNode.Name), - ) - if err != nil { - return errors.Wrap(err, "Decomposedfs: could not move child") - } - // update target parentid and name attribs := node.Attributes{} attribs.SetString(prefixes.ParentidAttr, newNode.ParentID) @@ -253,6 +292,28 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) sizeDiff = oldNode.Blobsize } + // rename node + err = os.Rename( + filepath.Join(oldNode.ParentPath(), oldNode.Name), + filepath.Join(newNode.ParentPath(), newNode.Name), + ) + if err != nil { + return errors.Wrap(err, "Decomposedfs: could not move child") + } + + // update the id cache + if newNode.ID == "" { + newNode.ID = oldNode.ID + } + _ = t.lookup.(*lookup.Lookup).CacheID(ctx, newNode.SpaceID, newNode.ID, filepath.Join(newNode.ParentPath(), newNode.Name)) + // update id cache for the moved subtree + if oldNode.IsDir(ctx) { + err = t.lookup.(*lookup.Lookup).WarmupIDCache(filepath.Join(newNode.ParentPath(), newNode.Name)) + if err != nil { + return err + } + } + // TODO inefficient because we might update several nodes twice, only propagate unchanged nodes? // collect in a list, then only stat each node once // also do this in a go routine ... webdav should check the etag async @@ -268,18 +329,6 @@ func (t *Tree) Move(ctx context.Context, oldNode *node.Node, newNode *node.Node) return nil } -func readChildNodeFromLink(ctx context.Context, path string) (string, error) { - _, span := tracer.Start(ctx, "readChildNodeFromLink") - defer span.End() - link, err := os.Readlink(path) - if err != nil { - return "", err - } - nodeID := strings.TrimLeft(link, "/.") - nodeID = strings.ReplaceAll(nodeID, "/", "") - return nodeID, nil -} - // ListFolder lists the content of a folder node func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, error) { ctx, span := tracer.Start(ctx, "ListFolder") @@ -329,22 +378,27 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro // Spawn workers that'll concurrently work the queue for i := 0; i < numWorkers; i++ { g.Go(func() error { - var err error + // switch user if necessary + spaceGID, ok := ctx.Value(decomposedfs.CtxKeySpaceGID).(uint32) + if ok { + unscope, err := t.userMapper.ScopeUserByIds(-1, int(spaceGID)) + if err != nil { + return errors.Wrap(err, "failed to scope user") + } + defer func() { _ = unscope() }() + } + for name := range work { path := filepath.Join(dir, name) - nodeID := getNodeIDFromCache(ctx, path, t.idCache) - if nodeID == "" { - nodeID, err = readChildNodeFromLink(ctx, path) - if err != nil { - return err - } - err = storeNodeIDInCache(ctx, path, nodeID, t.idCache) - if err != nil { - return err + nodeID, err := t.lookup.MetadataBackend().Get(ctx, path, prefixes.IDAttr) + if err != nil { + if metadata.IsAttrUnset(err) { + continue } + return err } - child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, nodeID, false, n.SpaceRoot, true) + child, err := node.ReadNode(ctx, t.lookup, n.SpaceID, string(nodeID), false, n.SpaceRoot, true) if err != nil { return err } @@ -384,7 +438,12 @@ func (t *Tree) ListFolder(ctx context.Context, n *node.Node) ([]*node.Node, erro // Delete deletes a node in the tree by moving it to the trash func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { - path := filepath.Join(n.ParentPath(), n.Name) + path := n.InternalPath() + + if !strings.HasPrefix(path, t.options.Root) { + return errtypes.InternalError("invalid internal path") + } + // remove entry from cache immediately to avoid inconsistencies defer func() { _ = t.idCache.Delete(path) }() @@ -392,19 +451,7 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { if deletingSharedResource != nil && deletingSharedResource.(bool) { src := filepath.Join(n.ParentPath(), n.Name) - return os.Remove(src) - } - - // get the original path - origin, err := t.lookup.Path(ctx, n, node.NoCheck) - if err != nil { - return - } - - // set origin location in metadata - nodePath := n.InternalPath() - if err := n.SetXattrString(ctx, prefixes.TrashOriginAttr, origin); err != nil { - return err + return os.RemoveAll(src) } var sizeDiff int64 @@ -418,53 +465,11 @@ func (t *Tree) Delete(ctx context.Context, n *node.Node) (err error) { sizeDiff = -n.Blobsize } - deletionTime := time.Now().UTC().Format(time.RFC3339Nano) - - // Prepare the trash - trashLink := filepath.Join(t.options.Root, "spaces", lookup.Pathify(n.SpaceRoot.ID, 1, 2), "trash", lookup.Pathify(n.ID, 4, 2)) - if err := os.MkdirAll(filepath.Dir(trashLink), 0700); err != nil { - // Roll back changes - _ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true) - return err - } - - // FIXME can we just move the node into the trash dir? instead of adding another symlink and appending a trash timestamp? - // can we just use the mtime as the trash time? - // TODO store a trashed by userid - - // first make node appear in the space trash - // parent id and name are stored as extended attributes in the node itself - err = os.Symlink("../../../../../nodes/"+lookup.Pathify(n.ID, 4, 2)+node.TrashIDDelimiter+deletionTime, trashLink) - if err != nil { - // Roll back changes - _ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true) - return - } - - // at this point we have a symlink pointing to a non existing destination, which is fine - - // rename the trashed node so it is not picked up when traversing up the tree and matches the symlink - trashPath := nodePath + node.TrashIDDelimiter + deletionTime - err = os.Rename(nodePath, trashPath) - if err != nil { - // To roll back changes - // TODO remove symlink - // Roll back changes - _ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true) - return - } - err = t.lookup.MetadataBackend().Rename(nodePath, trashPath) - if err != nil { - _ = n.RemoveXattr(ctx, prefixes.TrashOriginAttr, true) - _ = os.Rename(trashPath, nodePath) - return - } - // Remove lock file if it exists _ = os.Remove(n.LockFilePath()) // finally remove the entry from the parent dir - if err = os.Remove(path); err != nil { + if err = os.RemoveAll(path); err != nil { // To roll back changes // TODO revert the rename // TODO remove symlink @@ -729,18 +734,37 @@ func (t *Tree) InitNewNode(ctx context.Context, n *node.Node, fsize uint64) (met func (t *Tree) createDirNode(ctx context.Context, n *node.Node) (err error) { ctx, span := tracer.Start(ctx, "createDirNode") defer span.End() + + idcache := t.lookup.(*lookup.Lookup).IDCache // create a directory node - nodePath := n.InternalPath() - if err := os.MkdirAll(nodePath, 0700); err != nil { + parentPath, ok := idcache.Get(ctx, n.SpaceID, n.ParentID) + if !ok { + return errtypes.NotFound(n.ParentID) + } + path := filepath.Join(parentPath, n.Name) + + // lock the meta file + unlock, err := t.lookup.MetadataBackend().Lock(path) + if err != nil { + return err + } + defer func() { + _ = unlock() + }() + + if err := os.MkdirAll(path, 0700); err != nil { return errors.Wrap(err, "Decomposedfs: error creating node") } + _ = idcache.Set(ctx, n.SpaceID, n.ID, path) + attributes := n.NodeMetadata(ctx) + attributes[prefixes.IDAttr] = []byte(n.ID) attributes[prefixes.TreesizeAttr] = []byte("0") // initialize as empty, TODO why bother? if it is not set we could treat it as 0? if t.options.TreeTimeAccounting || t.options.TreeSizeAccounting { attributes[prefixes.PropagationAttr] = []byte("1") // mark the node for propagation } - return n.SetXattrsWithContext(ctx, attributes, true) + return n.SetXattrsWithContext(ctx, attributes, false) } var nodeIDRegep = regexp.MustCompile(`.*/nodes/([^.]*).*`) @@ -814,22 +838,3 @@ func (t *Tree) readRecycleItem(ctx context.Context, spaceID, key, path string) ( return } - -func getNodeIDFromCache(ctx context.Context, path string, cache store.Store) string { - _, span := tracer.Start(ctx, "getNodeIDFromCache") - defer span.End() - recs, err := cache.Read(path) - if err == nil && len(recs) > 0 { - return string(recs[0].Value) - } - return "" -} - -func storeNodeIDInCache(ctx context.Context, path string, nodeID string, cache store.Store) error { - _, span := tracer.Start(ctx, "storeNodeIDInCache") - defer span.End() - return cache.Write(&store.Record{ - Key: path, - Value: []byte(nodeID), - }) -} diff --git a/pkg/storage/fs/posix/tree/tree_suite_test.go b/pkg/storage/fs/posix/tree/tree_suite_test.go new file mode 100644 index 0000000000..61992539e0 --- /dev/null +++ b/pkg/storage/fs/posix/tree/tree_suite_test.go @@ -0,0 +1,13 @@ +package tree_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestTree(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Tree Suite") +} diff --git a/pkg/storage/fs/posix/tree/tree_test.go b/pkg/storage/fs/posix/tree/tree_test.go new file mode 100644 index 0000000000..4b85cd44eb --- /dev/null +++ b/pkg/storage/fs/posix/tree/tree_test.go @@ -0,0 +1,369 @@ +package tree_test + +import ( + "crypto/rand" + "log" + "os" + "strings" + "time" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + helpers "github.com/cs3org/reva/v2/pkg/storage/fs/posix/testhelpers" + "github.com/shirou/gopsutil/process" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func generateRandomString(length int) (string, error) { + const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + charsetLength := len(charset) + + randomBytes := make([]byte, length) + _, err := rand.Read(randomBytes) + if err != nil { + return "", err + } + + for i := 0; i < length; i++ { + randomBytes[i] = charset[int(randomBytes[i])%charsetLength] + } + + return string(randomBytes), nil +} + +var ( + env *helpers.TestEnv + + root string +) + +var _ = SynchronizedBeforeSuite(func() { + var err error + env, err = helpers.NewTestEnv(nil) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() bool { + // Get all running processes + processes, err := process.Processes() + if err != nil { + panic("could not get processes: " + err.Error()) + } + + // Search for the process named "inotifywait" + for _, p := range processes { + name, err := p.Name() + if err != nil { + log.Println(err) + continue + } + + if strings.Contains(name, "inotifywait") { + return true + } + } + + // Give it some time to setup the watches + time.Sleep(2 * time.Second) + return false + }).Should(BeTrue()) +}, func() {}) + +var _ = SynchronizedAfterSuite(func() {}, func() { + if env != nil { + env.Cleanup() + } +}) + +var _ = Describe("Tree", func() { + var ( + subtree string + ) + + BeforeEach(func() { + SetDefaultEventuallyTimeout(15 * time.Second) + + var err error + subtree, err = generateRandomString(10) + Expect(err).ToNot(HaveOccurred()) + subtree = "/" + subtree + root = env.Root + "/users/" + env.Owner.Username + subtree + Expect(os.Mkdir(root, 0700)).To(Succeed()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree, + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n.Exists).To(BeTrue()) + }).Should(Succeed()) + }) + + Describe("assimilation", func() { + Describe("of files", func() { + It("handles new files", func() { + _, err := os.Create(root + "/assimilated.txt") + Expect(err).ToNot(HaveOccurred()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/assimilated.txt", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n).ToNot(BeNil()) + g.Expect(n.Type(env.Ctx)).To(Equal(provider.ResourceType_RESOURCE_TYPE_FILE)) + g.Expect(n.ID).ToNot(BeEmpty()) + g.Expect(n.Blobsize).To(Equal(int64(0))) + }).ProbeEvery(200 * time.Millisecond).Should(Succeed()) + }) + + It("handles changed files", func() { + // Create empty file + _, err := os.Create(root + "/changed.txt") + Expect(err).ToNot(HaveOccurred()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/changed.txt", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n).ToNot(BeNil()) + g.Expect(n.ID).ToNot(BeEmpty()) + g.Expect(n.Blobsize).To(Equal(int64(0))) + }).ProbeEvery(200 * time.Millisecond).Should(Succeed()) + + // Change file content + Expect(os.WriteFile(root+"/changed.txt", []byte("hello world"), 0600)).To(Succeed()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/changed.txt", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n).ToNot(BeNil()) + g.Expect(n.Type(env.Ctx)).To(Equal(provider.ResourceType_RESOURCE_TYPE_FILE)) + g.Expect(n.ID).ToNot(BeEmpty()) + g.Expect(n.Blobsize).To(Equal(int64(11))) + }).Should(Succeed()) + }) + + It("handles deleted files", func() { + _, err := os.Create(root + "/deleted.txt") + Expect(err).ToNot(HaveOccurred()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/deleted.txt", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n).ToNot(BeNil()) + g.Expect(n.Type(env.Ctx)).To(Equal(provider.ResourceType_RESOURCE_TYPE_FILE)) + g.Expect(n.ID).ToNot(BeEmpty()) + }).Should(Succeed()) + + Expect(os.Remove(root + "/deleted.txt")).To(Succeed()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/deleted.txt", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n.Exists).To(BeFalse()) + }).Should(Succeed()) + }) + + It("handles moved files", func() { + // Create empty file + _, err := os.Create(root + "/original.txt") + Expect(err).ToNot(HaveOccurred()) + + fileID := "" + // Wait for the file to be indexed + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/original.txt", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n).ToNot(BeNil()) + g.Expect(n.Type(env.Ctx)).To(Equal(provider.ResourceType_RESOURCE_TYPE_FILE)) + g.Expect(n.ID).ToNot(BeEmpty()) + fileID = n.ID + g.Expect(n.Blobsize).To(Equal(int64(0))) + }).Should(Succeed()) + + // Move file + Expect(os.Rename(root+"/original.txt", root+"/moved.txt")).To(Succeed()) + + // Wait for the file to be indexed + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/original.txt", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n.Exists).To(BeFalse()) + }).Should(Succeed()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/moved.txt", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n).ToNot(BeNil()) + g.Expect(n.Type(env.Ctx)).To(Equal(provider.ResourceType_RESOURCE_TYPE_FILE)) + g.Expect(n.ID).To(Equal(fileID)) + g.Expect(n.Blobsize).To(Equal(int64(0))) + }).Should(Succeed()) + }) + }) + + Describe("of directories", func() { + It("handles new directories", func() { + Expect(os.Mkdir(root+"/assimilated", 0700)).To(Succeed()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/assimilated", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n).ToNot(BeNil()) + g.Expect(n.Type(env.Ctx)).To(Equal(provider.ResourceType_RESOURCE_TYPE_CONTAINER)) + g.Expect(n.ID).ToNot(BeEmpty()) + }).Should(Succeed()) + }) + + It("handles files in directories", func() { + Expect(os.Mkdir(root+"/assimilated", 0700)).To(Succeed()) + time.Sleep(100 * time.Millisecond) // Give it some time to settle down + Expect(os.WriteFile(root+"/assimilated/file.txt", []byte("hello world"), 0600)).To(Succeed()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/assimilated", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n).ToNot(BeNil()) + g.Expect(n.Type(env.Ctx)).To(Equal(provider.ResourceType_RESOURCE_TYPE_CONTAINER)) + g.Expect(n.ID).ToNot(BeEmpty()) + g.Expect(n.GetTreeSize(env.Ctx)).To(Equal(uint64(11))) + }).Should(Succeed()) + }) + + It("handles deleted directories", func() { + Expect(os.Mkdir(root+"/deleted", 0700)).To(Succeed()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/deleted", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n).ToNot(BeNil()) + g.Expect(n.Type(env.Ctx)).To(Equal(provider.ResourceType_RESOURCE_TYPE_CONTAINER)) + g.Expect(n.ID).ToNot(BeEmpty()) + }).Should(Succeed()) + + Expect(os.Remove(root + "/deleted")).To(Succeed()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/deleted", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n.Exists).To(BeFalse()) + }).Should(Succeed()) + }) + + It("handles moved directories", func() { + Expect(os.Mkdir(root+"/original", 0700)).To(Succeed()) + time.Sleep(100 * time.Millisecond) // Give it some time to settle down + Expect(os.WriteFile(root+"/original/file.txt", []byte("hello world"), 0600)).To(Succeed()) + + dirId := "" + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/original", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n).ToNot(BeNil()) + g.Expect(n.Type(env.Ctx)).To(Equal(provider.ResourceType_RESOURCE_TYPE_CONTAINER)) + g.Expect(n.ID).ToNot(BeEmpty()) + g.Expect(n.GetTreeSize(env.Ctx)).To(Equal(uint64(11))) + dirId = n.ID + }).Should(Succeed()) + + Expect(os.Rename(root+"/original", root+"/moved")).To(Succeed()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/original", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n.Exists).To(BeFalse()) + }).Should(Succeed()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + Path: subtree + "/moved", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n).ToNot(BeNil()) + g.Expect(n.Exists).To(BeTrue()) + g.Expect(n.Type(env.Ctx)).To(Equal(provider.ResourceType_RESOURCE_TYPE_CONTAINER)) + g.Expect(n.ID).To(Equal(dirId)) + g.Expect(n.GetTreeSize(env.Ctx)).To(Equal(uint64(11))) + }).Should(Succeed()) + }) + }) + }) + + Describe("propagation", func() { + It("propagates new files in a directory", func() { + Expect(os.Mkdir(root+"/assimilated", 0700)).To(Succeed()) + time.Sleep(100 * time.Millisecond) // Give it some time to settle down + Expect(os.WriteFile(root+"/assimilated/file.txt", []byte("hello world"), 0600)).To(Succeed()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + + Path: subtree + "/assimilated", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n).ToNot(BeNil()) + g.Expect(n.Type(env.Ctx)).To(Equal(provider.ResourceType_RESOURCE_TYPE_CONTAINER)) + g.Expect(n.ID).ToNot(BeEmpty()) + g.Expect(n.GetTreeSize(env.Ctx)).To(Equal(uint64(11))) + }).Should(Succeed()) + + Expect(os.WriteFile(root+"/assimilated/file2.txt", []byte("hello world"), 0600)).To(Succeed()) + + Eventually(func(g Gomega) { + n, err := env.Lookup.NodeFromResource(env.Ctx, &provider.Reference{ + ResourceId: env.SpaceRootRes, + + Path: subtree + "/assimilated", + }) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(n).ToNot(BeNil()) + g.Expect(n.Type(env.Ctx)).To(Equal(provider.ResourceType_RESOURCE_TYPE_CONTAINER)) + g.Expect(n.ID).ToNot(BeEmpty()) + g.Expect(n.GetTreeSize(env.Ctx)).To(Equal(uint64(22))) + }).Should(Succeed()) + }) + }) +}) diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 70ceb5134a..c3917526e7 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -23,6 +23,8 @@ import ( "io" "net/url" + tusd "github.com/tus/tusd/pkg/handler" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" registry "github.com/cs3org/go-cs3apis/cs3/storage/registry/v1beta1" ) @@ -71,6 +73,15 @@ type FS interface { DeleteStorageSpace(ctx context.Context, req *provider.DeleteStorageSpaceRequest) error } +// UnscopeFunc is a function that unscopes a user +type UnscopeFunc func() + +// Composable is the interface that a struct needs to implement +// to be composable, so that it can support the TUS methods +type ComposableFS interface { + UseIn(composer *tusd.StoreComposer) +} + // Registry is the interface that storage registries implement // for discovering storage providers type Registry interface { diff --git a/pkg/storage/utils/decomposedfs/aspects/aspects.go b/pkg/storage/utils/decomposedfs/aspects/aspects.go index c920c88b8f..3aeb542c72 100644 --- a/pkg/storage/utils/decomposedfs/aspects/aspects.go +++ b/pkg/storage/utils/decomposedfs/aspects/aspects.go @@ -22,6 +22,7 @@ import ( "github.com/cs3org/reva/v2/pkg/events" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/permissions" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/usermapper" ) // Aspects holds dependencies for handling aspects of the decomposedfs @@ -31,4 +32,5 @@ type Aspects struct { Permissions permissions.Permissions EventStream events.Stream DisableVersioning bool + UserMapper usermapper.Mapper } diff --git a/pkg/storage/utils/decomposedfs/decomposedfs.go b/pkg/storage/utils/decomposedfs/decomposedfs.go index 6bfc651478..fd8e9696d1 100644 --- a/pkg/storage/utils/decomposedfs/decomposedfs.go +++ b/pkg/storage/utils/decomposedfs/decomposedfs.go @@ -51,6 +51,7 @@ import ( "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/spaceidindex" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/tree" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/upload" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/usermapper" "github.com/cs3org/reva/v2/pkg/storage/utils/filelocks" "github.com/cs3org/reva/v2/pkg/storage/utils/templates" "github.com/cs3org/reva/v2/pkg/storagespace" @@ -65,6 +66,12 @@ import ( "golang.org/x/sync/errgroup" ) +type CtxKey int + +const ( + CtxKeySpaceGID CtxKey = iota +) + var ( tracer trace.Tracer @@ -105,6 +112,7 @@ type Decomposedfs struct { tp node.Tree o *options.Options p permissions.Permissions + um usermapper.Mapper chunkHandler *chunking.ChunkHandler stream events.Stream sessionStore SessionStore @@ -200,19 +208,25 @@ func New(o *options.Options, aspects aspects.Aspects) (storage.FS, error) { return nil, err } + // set a null usermapper if we don't have one + if aspects.UserMapper == nil { + aspects.UserMapper = &usermapper.NullMapper{} + } + fs := &Decomposedfs{ tp: aspects.Tree, lu: aspects.Lookup, o: o, p: aspects.Permissions, + um: aspects.UserMapper, chunkHandler: chunking.NewChunkHandler(filepath.Join(o.Root, "uploads")), stream: aspects.EventStream, UserCache: ttlcache.NewCache(), userSpaceIndex: userSpaceIndex, groupSpaceIndex: groupSpaceIndex, spaceTypeIndex: spaceTypeIndex, - sessionStore: upload.NewSessionStore(aspects.Lookup, aspects.Tree, o.Root, aspects.EventStream, o.AsyncFileUploads, o.Tokens, aspects.DisableVersioning), } + fs.sessionStore = upload.NewSessionStore(fs, aspects, o.Root, o.AsyncFileUploads, o.Tokens) if o.AsyncFileUploads { if fs.stream == nil { @@ -884,7 +898,7 @@ func (fs *Decomposedfs) GetMD(ctx context.Context, ref *provider.Reference, mdKe } } if addSpace { - if md.Space, err = fs.storageSpaceFromNode(ctx, node, true); err != nil { + if md.Space, err = fs.StorageSpaceFromNode(ctx, node, true); err != nil { return nil, err } } diff --git a/pkg/storage/utils/decomposedfs/grants.go b/pkg/storage/utils/decomposedfs/grants.go index 5990a5146c..227a2d5d49 100644 --- a/pkg/storage/utils/decomposedfs/grants.go +++ b/pkg/storage/utils/decomposedfs/grants.go @@ -20,7 +20,6 @@ package decomposedfs import ( "context" - "os" "path/filepath" "strings" @@ -29,11 +28,11 @@ import ( ctxpkg "github.com/cs3org/reva/v2/pkg/ctx" "github.com/cs3org/reva/v2/pkg/errtypes" "github.com/cs3org/reva/v2/pkg/storage/utils/ace" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/pkg/storagespace" "github.com/cs3org/reva/v2/pkg/utils" - "github.com/rogpeppe/go-internal/lockedfile" ) // DenyGrant denies access to a resource. @@ -80,7 +79,9 @@ func (fs *Decomposedfs) AddGrant(ctx context.Context, ref *provider.Reference, g if err != nil { return err } - defer unlockFunc() + defer func() { + _ = unlockFunc() + }() if grant != nil { return errtypes.AlreadyExists(filepath.Join(grantNode.ParentID, grantNode.Name)) @@ -176,7 +177,9 @@ func (fs *Decomposedfs) RemoveGrant(ctx context.Context, ref *provider.Reference if err != nil { return err } - defer unlockFunc() + defer func() { + _ = unlockFunc() + }() if grant == nil { return errtypes.NotFound("grant not found") @@ -237,7 +240,9 @@ func (fs *Decomposedfs) UpdateGrant(ctx context.Context, ref *provider.Reference if err != nil { return err } - defer unlockFunc() + defer func() { + _ = unlockFunc() + }() if grant == nil { // grant not found @@ -264,9 +269,7 @@ func (fs *Decomposedfs) UpdateGrant(ctx context.Context, ref *provider.Reference } // checks if the given grant exists and returns it. Nil grant means it doesn't exist -func (fs *Decomposedfs) loadGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (*node.Node, func(), *provider.Grant, error) { - var unlockFunc func() - +func (fs *Decomposedfs) loadGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (*node.Node, metadata.UnlockFunc, *provider.Grant, error) { n, err := fs.lu.NodeFromResource(ctx, ref) if err != nil { return nil, nil, nil, err @@ -275,11 +278,11 @@ func (fs *Decomposedfs) loadGrant(ctx context.Context, ref *provider.Reference, return nil, nil, nil, errtypes.NotFound(filepath.Join(n.ParentID, n.Name)) } - f, err := lockedfile.OpenFile(fs.lu.MetadataBackend().LockfilePath(n.InternalPath()), os.O_RDWR|os.O_CREATE, 0600) + // lock the metadata file + unlockFunc, err := fs.lu.MetadataBackend().Lock(n.InternalPath()) if err != nil { return nil, nil, nil, err } - unlockFunc = func() { f.Close() } grants, err := n.ListGrants(ctx) if err != nil { diff --git a/pkg/storage/utils/decomposedfs/metadata/errors.go b/pkg/storage/utils/decomposedfs/metadata/errors.go index 628910c4ed..c87cbc1d7e 100644 --- a/pkg/storage/utils/decomposedfs/metadata/errors.go +++ b/pkg/storage/utils/decomposedfs/metadata/errors.go @@ -23,6 +23,7 @@ import ( "os" "syscall" + "github.com/cs3org/reva/v2/pkg/errtypes" "github.com/pkg/errors" "github.com/pkg/xattr" ) @@ -30,6 +31,9 @@ import ( // IsNotExist checks if there is a os not exists error buried inside the xattr error, // as we cannot just use os.IsNotExist(). func IsNotExist(err error) bool { + if _, ok := err.(errtypes.IsNotFound); ok { + return true + } if os.IsNotExist(errors.Cause(err)) { return true } @@ -59,5 +63,10 @@ func IsNotDir(err error) bool { return serr == syscall.ENOTDIR } } + if xerr, ok := errors.Cause(err).(*xattr.Error); ok { + if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { + return serr == syscall.ENOTDIR + } + } return false } diff --git a/pkg/storage/utils/decomposedfs/spaces.go b/pkg/storage/utils/decomposedfs/spaces.go index 77702ec57d..c8e644ee12 100644 --- a/pkg/storage/utils/decomposedfs/spaces.go +++ b/pkg/storage/utils/decomposedfs/spaces.go @@ -76,6 +76,24 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr spaceID = reqSpaceID } + // Check if space already exists + rootPath := "" + switch req.Type { + case _spaceTypePersonal: + if fs.o.PersonalSpacePathTemplate != "" { + rootPath = filepath.Join(fs.o.Root, templates.WithUser(u, fs.o.PersonalSpacePathTemplate)) + } + default: + if fs.o.GeneralSpacePathTemplate != "" { + rootPath = filepath.Join(fs.o.Root, templates.WithSpacePropertiesAndUser(u, req.Type, req.Name, spaceID, fs.o.GeneralSpacePathTemplate)) + } + } + if rootPath != "" { + if _, err := os.Stat(rootPath); err == nil { + return nil, errtypes.AlreadyExists("decomposedfs: spaces: space already exists") + } + } + description := utils.ReadPlainFromOpaque(req.Opaque, "description") alias := utils.ReadPlainFromOpaque(req.Opaque, "spaceAlias") if alias == "" { @@ -97,20 +115,18 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr // create a directory node root.SetType(provider.ResourceType_RESOURCE_TYPE_CONTAINER) - rootPath := root.InternalPath() - switch req.Type { - case _spaceTypePersonal: - if fs.o.PersonalSpacePathTemplate != "" { - rootPath = filepath.Join(fs.o.Root, templates.WithUser(u, fs.o.PersonalSpacePathTemplate)) - } - default: - if fs.o.GeneralSpacePathTemplate != "" { - rootPath = filepath.Join(fs.o.Root, templates.WithSpacePropertiesAndUser(u, req.Type, req.Name, spaceID, fs.o.GeneralSpacePathTemplate)) - } + if rootPath == "" { + rootPath = root.InternalPath() + } + + // set 755 permissions for the base dir + if err := os.MkdirAll(filepath.Dir(rootPath), 0755); err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("Decomposedfs: error creating spaces base dir %s", filepath.Dir(rootPath))) } - if err := os.MkdirAll(rootPath, 0700); err != nil { - return nil, errors.Wrap(err, "Decomposedfs: error creating node") + // 770 permissions for the space + if err := os.MkdirAll(rootPath, 0770); err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("Decomposedfs: error creating space %s", rootPath)) } // Store id in cache @@ -199,7 +215,7 @@ func (fs *Decomposedfs) CreateStorageSpace(ctx context.Context, req *provider.Cr } } - space, err := fs.storageSpaceFromNode(ctx, root, true) + space, err := fs.StorageSpaceFromNode(ctx, root, true) if err != nil { return nil, err } @@ -289,7 +305,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide // return empty list return spaces, nil } - space, err := fs.storageSpaceFromNode(ctx, n, checkNodePermissions) + space, err := fs.StorageSpaceFromNode(ctx, n, checkNodePermissions) if err != nil { return nil, err } @@ -432,7 +448,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide continue } - space, err := fs.storageSpaceFromNode(ctx, n, checkNodePermissions) + space, err := fs.StorageSpaceFromNode(ctx, n, checkNodePermissions) if err != nil { switch err.(type) { case errtypes.IsPermissionDenied: @@ -485,7 +501,7 @@ func (fs *Decomposedfs) ListStorageSpaces(ctx context.Context, filter []*provide return nil, err } if n.Exists { - space, err := fs.storageSpaceFromNode(ctx, n, checkNodePermissions) + space, err := fs.StorageSpaceFromNode(ctx, n, checkNodePermissions) if err != nil { return nil, err } @@ -670,7 +686,7 @@ func (fs *Decomposedfs) UpdateStorageSpace(ctx context.Context, req *provider.Up } // send back the updated data from the storage - updatedSpace, err := fs.storageSpaceFromNode(ctx, spaceNode, false) + updatedSpace, err := fs.StorageSpaceFromNode(ctx, spaceNode, false) if err != nil { return nil, err } @@ -779,7 +795,7 @@ func (fs *Decomposedfs) linkStorageSpaceType(ctx context.Context, spaceType, spa return fs.spaceTypeIndex.Add(spaceType, spaceID, target) } -func (fs *Decomposedfs) storageSpaceFromNode(ctx context.Context, n *node.Node, checkPermissions bool) (*provider.StorageSpace, error) { +func (fs *Decomposedfs) StorageSpaceFromNode(ctx context.Context, n *node.Node, checkPermissions bool) (*provider.StorageSpace, error) { user := ctxpkg.ContextMustGetUser(ctx) if checkPermissions { rp, err := fs.p.AssemblePermissions(ctx, n) diff --git a/pkg/storage/utils/decomposedfs/upload.go b/pkg/storage/utils/decomposedfs/upload.go index 191556c44a..95758fdbfb 100644 --- a/pkg/storage/utils/decomposedfs/upload.go +++ b/pkg/storage/utils/decomposedfs/upload.go @@ -24,6 +24,7 @@ import ( "os" "path/filepath" "strings" + "syscall" "time" "github.com/google/uuid" @@ -180,6 +181,13 @@ func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Refere session.SetStorageValue("SpaceRoot", n.SpaceRoot.ID) // TODO SpaceRoot -> SpaceID session.SetStorageValue("SpaceOwnerOrManager", n.SpaceOwnerOrManager(ctx).GetOpaqueId()) // TODO needed for what? + // remember the gid of the space + fi, err := os.Stat(n.SpaceRoot.InternalPath()) + if err != nil { + return nil, err + } + session.SetStorageValue("SpaceGid", fmt.Sprintf("%d", (fi.Sys().(*syscall.Stat_t).Gid))) + iid, _ := ctxpkg.ContextGetInitiator(ctx) session.SetMetadata("initiatorid", iid) @@ -298,18 +306,20 @@ func (fs *Decomposedfs) InitiateUpload(ctx context.Context, ref *provider.Refere session.SetStorageValue("LogLevel", log.GetLevel().String()) log.Debug().Interface("session", session).Msg("Decomposedfs: built session info") - // Create binary file in the upload folder with no content - // It will be used when determining the current offset of an upload - err = session.TouchBin() - if err != nil { - return nil, err - } - err = session.Persist(ctx) + err = fs.um.RunInBaseScope(func() error { + // Create binary file in the upload folder with no content + // It will be used when determining the current offset of an upload + err := session.TouchBin() + if err != nil { + return err + } + + return session.Persist(ctx) + }) if err != nil { return nil, err } - metrics.UploadSessionsInitiated.Inc() if uploadLength == 0 { @@ -345,7 +355,13 @@ func (fs *Decomposedfs) NewUpload(ctx context.Context, info tusd.FileInfo) (tusd // GetUpload returns the Upload for the given upload id func (fs *Decomposedfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) { - return fs.sessionStore.Get(ctx, id) + var ul tusd.Upload + var err error + _ = fs.um.RunInBaseScope(func() error { + ul, err = fs.sessionStore.Get(ctx, id) + return nil + }) + return ul, err } // ListUploadSessions returns the upload sessions for the given filter diff --git a/pkg/storage/utils/decomposedfs/upload/store.go b/pkg/storage/utils/decomposedfs/upload/store.go index b5b1aa9fdb..f30fee224f 100644 --- a/pkg/storage/utils/decomposedfs/upload/store.go +++ b/pkg/storage/utils/decomposedfs/upload/store.go @@ -34,10 +34,13 @@ import ( "github.com/cs3org/reva/v2/pkg/appctx" "github.com/cs3org/reva/v2/pkg/errtypes" "github.com/cs3org/reva/v2/pkg/events" + "github.com/cs3org/reva/v2/pkg/storage" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/aspects" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata/prefixes" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/options" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/usermapper" "github.com/google/uuid" "github.com/pkg/errors" "github.com/rogpeppe/go-internal/lockedfile" @@ -53,8 +56,10 @@ type PermissionsChecker interface { // OcisStore manages upload sessions type OcisStore struct { + fs storage.FS lu node.PathLookup tp node.Tree + um usermapper.Mapper root string pub events.Publisher async bool @@ -63,15 +68,17 @@ type OcisStore struct { } // NewSessionStore returns a new OcisStore -func NewSessionStore(lu node.PathLookup, tp node.Tree, root string, pub events.Publisher, async bool, tknopts options.TokenOptions, disableVersioning bool) *OcisStore { +func NewSessionStore(fs storage.FS, aspects aspects.Aspects, root string, async bool, tknopts options.TokenOptions) *OcisStore { return &OcisStore{ - lu: lu, - tp: tp, + fs: fs, + lu: aspects.Lookup, + tp: aspects.Tree, root: root, - pub: pub, + pub: aspects.EventStream, async: async, tknopts: tknopts, - disableVersioning: disableVersioning, + disableVersioning: aspects.DisableVersioning, + um: aspects.UserMapper, } } @@ -230,7 +237,7 @@ func (store OcisStore) CreateNodeForUpload(session *OcisSession, initAttrs node. unlock, err = store.tp.InitNewNode(ctx, n, uint64(session.Size())) if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Msg("failed to init new node") + appctx.GetLogger(ctx).Error().Str("path", n.InternalPath()).Err(err).Msg("failed to init new node") } session.info.MetaData["sizeDiff"] = strconv.FormatInt(session.Size(), 10) } @@ -270,7 +277,10 @@ func (store OcisStore) CreateNodeForUpload(session *OcisSession, initAttrs node. return nil, errors.Wrap(err, "Decomposedfs: could not write metadata") } - if err := session.Persist(ctx); err != nil { + err = store.um.RunInBaseScope(func() error { + return session.Persist(ctx) + }) + if err != nil { return nil, err } @@ -340,9 +350,8 @@ func (store OcisStore) updateExistingNode(ctx context.Context, session *OcisSess } } - versionPath := n.InternalPath() if !store.disableVersioning { - versionPath = session.store.lu.InternalPath(spaceID, n.ID+node.RevisionIDDelimiter+oldNodeMtime.UTC().Format(time.RFC3339Nano)) + versionPath := session.store.lu.InternalPath(spaceID, n.ID+node.RevisionIDDelimiter+oldNodeMtime.UTC().Format(time.RFC3339Nano)) // create version node if _, err := os.Create(versionPath); err != nil { @@ -359,14 +368,14 @@ func (store OcisStore) updateExistingNode(ctx context.Context, session *OcisSess }, f, true); err != nil { return unlock, err } + session.info.MetaData["versionsPath"] = versionPath + // keep mtime from previous version + if err := os.Chtimes(session.info.MetaData["versionsPath"], oldNodeMtime, oldNodeMtime); err != nil { + return unlock, errtypes.InternalError(fmt.Sprintf("failed to change mtime of version node: %s", err)) + } } - session.info.MetaData["sizeDiff"] = strconv.FormatInt((int64(fsize) - old.Blobsize), 10) - session.info.MetaData["versionsPath"] = versionPath - // keep mtime from previous version - if err := os.Chtimes(session.info.MetaData["versionsPath"], oldNodeMtime, oldNodeMtime); err != nil { - return unlock, errtypes.InternalError(fmt.Sprintf("failed to change mtime of version node: %s", err)) - } + session.info.MetaData["sizeDiff"] = strconv.FormatInt((int64(fsize) - old.Blobsize), 10) return unlock, nil } diff --git a/pkg/storage/utils/decomposedfs/upload/store_test.go b/pkg/storage/utils/decomposedfs/upload/store_test.go index 58dce7ce9d..9bfa3b475e 100644 --- a/pkg/storage/utils/decomposedfs/upload/store_test.go +++ b/pkg/storage/utils/decomposedfs/upload/store_test.go @@ -9,6 +9,7 @@ import ( providerv1beta1 "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" "github.com/cs3org/reva/v2/pkg/errtypes" "github.com/cs3org/reva/v2/pkg/storage/cache" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/aspects" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/lookup" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/metadata" "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/node" @@ -24,7 +25,11 @@ func TestInitNewNode(t *testing.T) { lookup := lookup.New(metadata.NewMessagePackBackend(root, cache.Config{}), &options.Options{Root: root}) tp := tree.New(lookup, nil, &options.Options{}, nil) - store := NewSessionStore(lookup, tp, root, nil, false, options.TokenOptions{}, false) + aspects := aspects.Aspects{ + Lookup: lookup, + Tree: tp, + } + store := NewSessionStore(nil, aspects, root, false, options.TokenOptions{}) rootNode := node.New("e48c4e7a-beac-4b82-b991-a5cff7b8c39c", "e48c4e7a-beac-4b82-b991-a5cff7b8c39c", "", "", 0, "", providerv1beta1.ResourceType_RESOURCE_TYPE_CONTAINER, &userv1beta1.UserId{}, lookup) rootNode.Exists = true diff --git a/pkg/storage/utils/decomposedfs/upload/upload.go b/pkg/storage/utils/decomposedfs/upload/upload.go index 9d93b112a9..d540dbf142 100644 --- a/pkg/storage/utils/decomposedfs/upload/upload.go +++ b/pkg/storage/utils/decomposedfs/upload/upload.go @@ -26,6 +26,7 @@ import ( "io" "io/fs" "os" + "strconv" "strings" "time" @@ -143,6 +144,22 @@ func (session *OcisSession) FinishUpload(ctx context.Context) error { prefixes.ChecksumPrefix + "adler32": adler32h.Sum(nil), } + // At this point we scope by the space to create the final file in the final location + if session.store.um != nil && session.info.Storage["SpaceGid"] != "" { + gid, err := strconv.Atoi(session.info.Storage["SpaceGid"]) + if err != nil { + return errors.Wrap(err, "failed to parse space gid") + } + + unscope, err := session.store.um.ScopeUserByIds(-1, gid) + if err != nil { + return errors.Wrap(err, "failed to scope user") + } + if unscope != nil { + defer func() { _ = unscope() }() + } + } + n, err := session.store.CreateNodeForUpload(session, attrs) if err != nil { session.store.Cleanup(ctx, session, true, false, false) @@ -198,7 +215,9 @@ func (session *OcisSession) Terminate(_ context.Context) error { func (session *OcisSession) DeclareLength(ctx context.Context, length int64) error { session.info.Size = length session.info.SizeIsDeferred = false - return session.Persist(session.Context(ctx)) + return session.store.um.RunInBaseScope(func() error { + return session.Persist(session.Context(ctx)) + }) } // ConcatUploads concatenates multiple uploads @@ -231,7 +250,8 @@ func (session *OcisSession) Finalize() (err error) { ctx, span := tracer.Start(session.Context(context.Background()), "Finalize") defer span.End() - revisionNode := &node.Node{SpaceID: session.SpaceID(), BlobID: session.ID(), Blobsize: session.Size()} + revisionNode := node.New(session.SpaceID(), session.NodeID(), "", "", session.Size(), session.ID(), + provider.ResourceType_RESOURCE_TYPE_FILE, session.SpaceOwner(), session.store.lu) // upload the data to the blobstore _, subspan := tracer.Start(ctx, "WriteBlob") @@ -270,9 +290,9 @@ func (session *OcisSession) Cleanup(revertNodeMetadata, cleanBin, cleanInfo bool if revertNodeMetadata { n, err := session.Node(ctx) if err != nil { - appctx.GetLogger(ctx).Error().Err(err).Str("spaceid", n.SpaceID).Str("nodeid", n.ID).Str("uploadid", session.ID()).Msg("reading node for session failed") + appctx.GetLogger(ctx).Error().Err(err).Str("sessionid", session.ID()).Msg("reading node for session failed") } - if session.NodeExists() { + if session.NodeExists() && session.info.MetaData["versionsPath"] != "" { p := session.info.MetaData["versionsPath"] if err := session.store.lu.CopyMetadata(ctx, p, n.InternalPath(), func(attributeName string, value []byte) (newValue []byte, copy bool) { return value, strings.HasPrefix(attributeName, prefixes.ChecksumPrefix) || diff --git a/pkg/storage/utils/decomposedfs/usermapper/mocks/Mapper.go b/pkg/storage/utils/decomposedfs/usermapper/mocks/Mapper.go new file mode 100644 index 0000000000..e08a86edbd --- /dev/null +++ b/pkg/storage/utils/decomposedfs/usermapper/mocks/Mapper.go @@ -0,0 +1,337 @@ +// Copyright 2018-2022 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +// Code generated by mockery v2.40.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// Mapper is an autogenerated mock type for the Mapper type +type Mapper struct { + mock.Mock +} + +type Mapper_Expecter struct { + mock *mock.Mock +} + +func (_m *Mapper) EXPECT() *Mapper_Expecter { + return &Mapper_Expecter{mock: &_m.Mock} +} + +// MapUser provides a mock function with given fields: username +func (_m *Mapper) MapUser(username string) (int, int, error) { + ret := _m.Called(username) + + if len(ret) == 0 { + panic("no return value specified for MapUser") + } + + var r0 int + var r1 int + var r2 error + if rf, ok := ret.Get(0).(func(string) (int, int, error)); ok { + return rf(username) + } + if rf, ok := ret.Get(0).(func(string) int); ok { + r0 = rf(username) + } else { + r0 = ret.Get(0).(int) + } + + if rf, ok := ret.Get(1).(func(string) int); ok { + r1 = rf(username) + } else { + r1 = ret.Get(1).(int) + } + + if rf, ok := ret.Get(2).(func(string) error); ok { + r2 = rf(username) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// Mapper_MapUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MapUser' +type Mapper_MapUser_Call struct { + *mock.Call +} + +// MapUser is a helper method to define mock.On call +// - username string +func (_e *Mapper_Expecter) MapUser(username interface{}) *Mapper_MapUser_Call { + return &Mapper_MapUser_Call{Call: _e.mock.On("MapUser", username)} +} + +func (_c *Mapper_MapUser_Call) Run(run func(username string)) *Mapper_MapUser_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(string)) + }) + return _c +} + +func (_c *Mapper_MapUser_Call) Return(_a0 int, _a1 int, _a2 error) *Mapper_MapUser_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *Mapper_MapUser_Call) RunAndReturn(run func(string) (int, int, error)) *Mapper_MapUser_Call { + _c.Call.Return(run) + return _c +} + +// RunInBaseScope provides a mock function with given fields: f +func (_m *Mapper) RunInBaseScope(f func() error) error { + ret := _m.Called(f) + + if len(ret) == 0 { + panic("no return value specified for RunInBaseScope") + } + + var r0 error + if rf, ok := ret.Get(0).(func(func() error) error); ok { + r0 = rf(f) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Mapper_RunInBaseScope_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RunInBaseScope' +type Mapper_RunInBaseScope_Call struct { + *mock.Call +} + +// RunInBaseScope is a helper method to define mock.On call +// - f func() error +func (_e *Mapper_Expecter) RunInBaseScope(f interface{}) *Mapper_RunInBaseScope_Call { + return &Mapper_RunInBaseScope_Call{Call: _e.mock.On("RunInBaseScope", f)} +} + +func (_c *Mapper_RunInBaseScope_Call) Run(run func(f func() error)) *Mapper_RunInBaseScope_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func() error)) + }) + return _c +} + +func (_c *Mapper_RunInBaseScope_Call) Return(_a0 error) *Mapper_RunInBaseScope_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Mapper_RunInBaseScope_Call) RunAndReturn(run func(func() error) error) *Mapper_RunInBaseScope_Call { + _c.Call.Return(run) + return _c +} + +// ScopeBase provides a mock function with given fields: +func (_m *Mapper) ScopeBase() (func() error, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ScopeBase") + } + + var r0 func() error + var r1 error + if rf, ok := ret.Get(0).(func() (func() error, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() func() error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(func() error) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Mapper_ScopeBase_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ScopeBase' +type Mapper_ScopeBase_Call struct { + *mock.Call +} + +// ScopeBase is a helper method to define mock.On call +func (_e *Mapper_Expecter) ScopeBase() *Mapper_ScopeBase_Call { + return &Mapper_ScopeBase_Call{Call: _e.mock.On("ScopeBase")} +} + +func (_c *Mapper_ScopeBase_Call) Run(run func()) *Mapper_ScopeBase_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Mapper_ScopeBase_Call) Return(_a0 func() error, _a1 error) *Mapper_ScopeBase_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Mapper_ScopeBase_Call) RunAndReturn(run func() (func() error, error)) *Mapper_ScopeBase_Call { + _c.Call.Return(run) + return _c +} + +// ScopeUser provides a mock function with given fields: ctx +func (_m *Mapper) ScopeUser(ctx context.Context) (func() error, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ScopeUser") + } + + var r0 func() error + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (func() error, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) func() error); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(func() error) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Mapper_ScopeUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ScopeUser' +type Mapper_ScopeUser_Call struct { + *mock.Call +} + +// ScopeUser is a helper method to define mock.On call +// - ctx context.Context +func (_e *Mapper_Expecter) ScopeUser(ctx interface{}) *Mapper_ScopeUser_Call { + return &Mapper_ScopeUser_Call{Call: _e.mock.On("ScopeUser", ctx)} +} + +func (_c *Mapper_ScopeUser_Call) Run(run func(ctx context.Context)) *Mapper_ScopeUser_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *Mapper_ScopeUser_Call) Return(_a0 func() error, _a1 error) *Mapper_ScopeUser_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Mapper_ScopeUser_Call) RunAndReturn(run func(context.Context) (func() error, error)) *Mapper_ScopeUser_Call { + _c.Call.Return(run) + return _c +} + +// ScopeUserByIds provides a mock function with given fields: uid, gid +func (_m *Mapper) ScopeUserByIds(uid int, gid int) (func() error, error) { + ret := _m.Called(uid, gid) + + if len(ret) == 0 { + panic("no return value specified for ScopeUserByIds") + } + + var r0 func() error + var r1 error + if rf, ok := ret.Get(0).(func(int, int) (func() error, error)); ok { + return rf(uid, gid) + } + if rf, ok := ret.Get(0).(func(int, int) func() error); ok { + r0 = rf(uid, gid) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(func() error) + } + } + + if rf, ok := ret.Get(1).(func(int, int) error); ok { + r1 = rf(uid, gid) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Mapper_ScopeUserByIds_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ScopeUserByIds' +type Mapper_ScopeUserByIds_Call struct { + *mock.Call +} + +// ScopeUserByIds is a helper method to define mock.On call +// - uid int +// - gid int +func (_e *Mapper_Expecter) ScopeUserByIds(uid interface{}, gid interface{}) *Mapper_ScopeUserByIds_Call { + return &Mapper_ScopeUserByIds_Call{Call: _e.mock.On("ScopeUserByIds", uid, gid)} +} + +func (_c *Mapper_ScopeUserByIds_Call) Run(run func(uid int, gid int)) *Mapper_ScopeUserByIds_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(int), args[1].(int)) + }) + return _c +} + +func (_c *Mapper_ScopeUserByIds_Call) Return(_a0 func() error, _a1 error) *Mapper_ScopeUserByIds_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *Mapper_ScopeUserByIds_Call) RunAndReturn(run func(int, int) (func() error, error)) *Mapper_ScopeUserByIds_Call { + _c.Call.Return(run) + return _c +} + +// NewMapper creates a new instance of Mapper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMapper(t interface { + mock.TestingT + Cleanup(func()) +}) *Mapper { + mock := &Mapper{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/storage/utils/decomposedfs/usermapper/mocks/UserMapper.go b/pkg/storage/utils/decomposedfs/usermapper/mocks/UserMapper.go new file mode 100644 index 0000000000..8cc96b5b56 --- /dev/null +++ b/pkg/storage/utils/decomposedfs/usermapper/mocks/UserMapper.go @@ -0,0 +1,274 @@ +// Copyright 2018-2022 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +// Code generated by mockery v2.40.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// UserMapper is an autogenerated mock type for the UserMapper type +type UserMapper struct { + mock.Mock +} + +type UserMapper_Expecter struct { + mock *mock.Mock +} + +func (_m *UserMapper) EXPECT() *UserMapper_Expecter { + return &UserMapper_Expecter{mock: &_m.Mock} +} + +// RunInBaseScope provides a mock function with given fields: f +func (_m *UserMapper) RunInBaseScope(f func() error) error { + ret := _m.Called(f) + + if len(ret) == 0 { + panic("no return value specified for RunInBaseScope") + } + + var r0 error + if rf, ok := ret.Get(0).(func(func() error) error); ok { + r0 = rf(f) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UserMapper_RunInBaseScope_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RunInBaseScope' +type UserMapper_RunInBaseScope_Call struct { + *mock.Call +} + +// RunInBaseScope is a helper method to define mock.On call +// - f func() error +func (_e *UserMapper_Expecter) RunInBaseScope(f interface{}) *UserMapper_RunInBaseScope_Call { + return &UserMapper_RunInBaseScope_Call{Call: _e.mock.On("RunInBaseScope", f)} +} + +func (_c *UserMapper_RunInBaseScope_Call) Run(run func(f func() error)) *UserMapper_RunInBaseScope_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func() error)) + }) + return _c +} + +func (_c *UserMapper_RunInBaseScope_Call) Return(_a0 error) *UserMapper_RunInBaseScope_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *UserMapper_RunInBaseScope_Call) RunAndReturn(run func(func() error) error) *UserMapper_RunInBaseScope_Call { + _c.Call.Return(run) + return _c +} + +// ScopeBase provides a mock function with given fields: +func (_m *UserMapper) ScopeBase() (func() error, error) { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ScopeBase") + } + + var r0 func() error + var r1 error + if rf, ok := ret.Get(0).(func() (func() error, error)); ok { + return rf() + } + if rf, ok := ret.Get(0).(func() func() error); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(func() error) + } + } + + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UserMapper_ScopeBase_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ScopeBase' +type UserMapper_ScopeBase_Call struct { + *mock.Call +} + +// ScopeBase is a helper method to define mock.On call +func (_e *UserMapper_Expecter) ScopeBase() *UserMapper_ScopeBase_Call { + return &UserMapper_ScopeBase_Call{Call: _e.mock.On("ScopeBase")} +} + +func (_c *UserMapper_ScopeBase_Call) Run(run func()) *UserMapper_ScopeBase_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *UserMapper_ScopeBase_Call) Return(_a0 func() error, _a1 error) *UserMapper_ScopeBase_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *UserMapper_ScopeBase_Call) RunAndReturn(run func() (func() error, error)) *UserMapper_ScopeBase_Call { + _c.Call.Return(run) + return _c +} + +// ScopeUser provides a mock function with given fields: ctx +func (_m *UserMapper) ScopeUser(ctx context.Context) (func() error, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for ScopeUser") + } + + var r0 func() error + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (func() error, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) func() error); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(func() error) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UserMapper_ScopeUser_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ScopeUser' +type UserMapper_ScopeUser_Call struct { + *mock.Call +} + +// ScopeUser is a helper method to define mock.On call +// - ctx context.Context +func (_e *UserMapper_Expecter) ScopeUser(ctx interface{}) *UserMapper_ScopeUser_Call { + return &UserMapper_ScopeUser_Call{Call: _e.mock.On("ScopeUser", ctx)} +} + +func (_c *UserMapper_ScopeUser_Call) Run(run func(ctx context.Context)) *UserMapper_ScopeUser_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *UserMapper_ScopeUser_Call) Return(_a0 func() error, _a1 error) *UserMapper_ScopeUser_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *UserMapper_ScopeUser_Call) RunAndReturn(run func(context.Context) (func() error, error)) *UserMapper_ScopeUser_Call { + _c.Call.Return(run) + return _c +} + +// ScopeUserByIds provides a mock function with given fields: uid, gid +func (_m *UserMapper) ScopeUserByIds(uid int, gid int) (func() error, error) { + ret := _m.Called(uid, gid) + + if len(ret) == 0 { + panic("no return value specified for ScopeUserByIds") + } + + var r0 func() error + var r1 error + if rf, ok := ret.Get(0).(func(int, int) (func() error, error)); ok { + return rf(uid, gid) + } + if rf, ok := ret.Get(0).(func(int, int) func() error); ok { + r0 = rf(uid, gid) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(func() error) + } + } + + if rf, ok := ret.Get(1).(func(int, int) error); ok { + r1 = rf(uid, gid) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UserMapper_ScopeUserByIds_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ScopeUserByIds' +type UserMapper_ScopeUserByIds_Call struct { + *mock.Call +} + +// ScopeUserByIds is a helper method to define mock.On call +// - uid int +// - gid int +func (_e *UserMapper_Expecter) ScopeUserByIds(uid interface{}, gid interface{}) *UserMapper_ScopeUserByIds_Call { + return &UserMapper_ScopeUserByIds_Call{Call: _e.mock.On("ScopeUserByIds", uid, gid)} +} + +func (_c *UserMapper_ScopeUserByIds_Call) Run(run func(uid int, gid int)) *UserMapper_ScopeUserByIds_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(int), args[1].(int)) + }) + return _c +} + +func (_c *UserMapper_ScopeUserByIds_Call) Return(_a0 func() error, _a1 error) *UserMapper_ScopeUserByIds_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *UserMapper_ScopeUserByIds_Call) RunAndReturn(run func(int, int) (func() error, error)) *UserMapper_ScopeUserByIds_Call { + _c.Call.Return(run) + return _c +} + +// NewUserMapper creates a new instance of UserMapper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewUserMapper(t interface { + mock.TestingT + Cleanup(func()) +}) *UserMapper { + mock := &UserMapper{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/storage/utils/decomposedfs/usermapper/usermapper.go b/pkg/storage/utils/decomposedfs/usermapper/usermapper.go new file mode 100644 index 0000000000..c5a51376d9 --- /dev/null +++ b/pkg/storage/utils/decomposedfs/usermapper/usermapper.go @@ -0,0 +1,56 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package usermapper + +import ( + "context" +) + +// Mapper is the interface that wraps the basic mapping methods +type Mapper interface { + RunInBaseScope(f func() error) error + ScopeBase() (func() error, error) + ScopeUser(ctx context.Context) (func() error, error) + ScopeUserByIds(uid, gid int) (func() error, error) +} + +// UnscopeFunc is a function that unscopes the current user +type UnscopeFunc func() error + +// NullMapper is a user mapper that does nothing +type NullMapper struct{} + +// RunInBaseScope runs the given function in the scope of the base user +func (nm *NullMapper) RunInBaseScope(f func() error) error { + return f() +} + +// ScopeBase returns to the base uid and gid returning a function that can be used to restore the previous scope +func (nm *NullMapper) ScopeBase() (func() error, error) { + return func() error { return nil }, nil +} + +// ScopeUser returns to the base uid and gid returning a function that can be used to restore the previous scope +func (nm *NullMapper) ScopeUser(ctx context.Context) (func() error, error) { + return func() error { return nil }, nil +} + +func (nm *NullMapper) ScopeUserByIds(uid, gid int) (func() error, error) { + return func() error { return nil }, nil +} diff --git a/pkg/storage/utils/decomposedfs/usermapper/usermapper_linux.go b/pkg/storage/utils/decomposedfs/usermapper/usermapper_linux.go new file mode 100644 index 0000000000..f8863bdecc --- /dev/null +++ b/pkg/storage/utils/decomposedfs/usermapper/usermapper_linux.go @@ -0,0 +1,131 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package usermapper + +import ( + "context" + "fmt" + "os/user" + "runtime" + "strconv" + + "golang.org/x/sys/unix" + + revactx "github.com/cs3org/reva/v2/pkg/ctx" +) + +// UnixMapper is a user mapper that maps users to unix uids and gids +type UnixMapper struct { + baseUid int + baseGid int +} + +// New returns a new user mapper +func NewUnixMapper() *UnixMapper { + baseUid, _ := unix.SetfsuidRetUid(-1) + baseGid, _ := unix.SetfsgidRetGid(-1) + + return &UnixMapper{ + baseUid: baseUid, + baseGid: baseGid, + } +} + +// RunInUserScope runs the given function in the scope of the base user +func (um *UnixMapper) RunInBaseScope(f func() error) error { + unscope, err := um.ScopeBase() + if err != nil { + return err + } + defer func() { _ = unscope() }() + + return f() +} + +// ScopeBase returns to the base uid and gid returning a function that can be used to restore the previous scope +func (um *UnixMapper) ScopeBase() (func() error, error) { + return um.ScopeUserByIds(-1, um.baseGid) +} + +// ScopeUser returns to the base uid and gid returning a function that can be used to restore the previous scope +func (um *UnixMapper) ScopeUser(ctx context.Context) (func() error, error) { + u := revactx.ContextMustGetUser(ctx) + + uid, gid, err := um.mapUser(u.Username) + if err != nil { + return nil, err + } + return um.ScopeUserByIds(uid, gid) +} + +// ScopeUserByIds scopes the current user to the given uid and gid returning a function that can be used to restore the previous scope +func (um *UnixMapper) ScopeUserByIds(uid, gid int) (func() error, error) { + runtime.LockOSThread() // Lock this Goroutine to the current OS thread + + var err error + var prevUid int + var prevGid int + if uid >= 0 { + prevUid, err = unix.SetfsuidRetUid(uid) + if err != nil { + return nil, err + } + if testUid, _ := unix.SetfsuidRetUid(-1); testUid != uid { + return nil, fmt.Errorf("failed to setfsuid to %d", uid) + } + } + if gid >= 0 { + prevGid, err = unix.SetfsgidRetGid(gid) + if err != nil { + return nil, err + } + if testGid, _ := unix.SetfsgidRetGid(-1); testGid != gid { + return nil, fmt.Errorf("failed to setfsgid to %d", gid) + } + } + + return func() error { + if uid >= 0 { + _ = unix.Setfsuid(prevUid) + } + if gid >= 0 { + _ = unix.Setfsgid(prevGid) + } + runtime.UnlockOSThread() + return nil + }, nil +} + +func (u *UnixMapper) mapUser(username string) (int, int, error) { + userDetails, err := user.Lookup(username) + if err != nil { + return 0, 0, err + } + + uid, err := strconv.Atoi(userDetails.Uid) + if err != nil { + return 0, 0, err + } + gid, err := strconv.Atoi(userDetails.Gid) + if err != nil { + return 0, 0, err + } + + return uid, gid, nil +} diff --git a/pkg/storage/utils/middleware/middleware.go b/pkg/storage/utils/middleware/middleware.go new file mode 100644 index 0000000000..7617be693f --- /dev/null +++ b/pkg/storage/utils/middleware/middleware.go @@ -0,0 +1,1090 @@ +// Copyright 2018-2024 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package middleware + +import ( + "context" + "io" + "net/url" + + tusd "github.com/tus/tusd/pkg/handler" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/v2/pkg/storage" + "github.com/cs3org/reva/v2/pkg/storage/utils/decomposedfs/upload" + "github.com/cs3org/reva/v2/pkg/storagespace" +) + +// UnHook is a function that is called after the actual method is executed. +type UnHook func() error + +// Hook is a function that is called before the actual method is executed. +type Hook func(methodName string, ctx context.Context, spaceID string) (context.Context, UnHook, error) + +// FS is a storage.FS implementation that wraps another storage.FS and calls hooks before and after each method. +type FS struct { + next storage.FS + hooks []Hook +} + +func NewFS(next storage.FS, hooks ...Hook) *FS { + return &FS{ + next: next, + hooks: hooks, + } +} + +// ListUploadSessions returns the upload sessions matching the given filter +func (f *FS) ListUploadSessions(ctx context.Context, filter storage.UploadSessionFilter) ([]storage.UploadSession, error) { + return f.next.(storage.UploadSessionLister).ListUploadSessions(ctx, filter) +} + +// UseIn tells the tus upload middleware which extensions it supports. +func (f *FS) UseIn(composer *tusd.StoreComposer) { + f.next.(storage.ComposableFS).UseIn(composer) +} + +// NewUpload returns a new tus Upload instance +func (f *FS) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd.Upload, err error) { + return f.next.(tusd.DataStore).NewUpload(ctx, info) +} + +// NewUpload returns a new tus Upload instance +func (f *FS) GetUpload(ctx context.Context, id string) (upload tusd.Upload, err error) { + return f.next.(tusd.DataStore).GetUpload(ctx, id) +} + +// AsTerminatableUpload returns a TerminatableUpload +// To implement the termination extension as specified in https://tus.io/protocols/resumable-upload.html#termination +// the storage needs to implement AsTerminatableUpload +func (f *FS) AsTerminatableUpload(up tusd.Upload) tusd.TerminatableUpload { + return up.(*upload.OcisSession) +} + +// AsLengthDeclarableUpload returns a LengthDeclarableUpload +// To implement the creation-defer-length extension as specified in https://tus.io/protocols/resumable-upload.html#creation +// the storage needs to implement AsLengthDeclarableUpload +func (f *FS) AsLengthDeclarableUpload(up tusd.Upload) tusd.LengthDeclarableUpload { + return up.(*upload.OcisSession) +} + +// AsConcatableUpload returns a ConcatableUpload +// To implement the concatenation extension as specified in https://tus.io/protocols/resumable-upload.html#concatenation +// the storage needs to implement AsConcatableUpload +func (f *FS) AsConcatableUpload(up tusd.Upload) tusd.ConcatableUpload { + return up.(*upload.OcisSession) +} + +func (f *FS) GetHome(ctx context.Context) (string, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("GetHome", ctx, "") + if err != nil { + return "", err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.GetHome(ctx) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return "", err + } + } + + return res0, res1 +} + +func (f *FS) CreateHome(ctx context.Context) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("CreateHome", ctx, "") + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.CreateHome(ctx) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) CreateDir(ctx context.Context, ref *provider.Reference) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("CreateDir", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.CreateDir(ctx, ref) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) TouchFile(ctx context.Context, ref *provider.Reference, markprocessing bool, mtime string) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("TouchFile", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.TouchFile(ctx, ref, markprocessing, mtime) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) Delete(ctx context.Context, ref *provider.Reference) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("Delete", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.Delete(ctx, ref) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) Move(ctx context.Context, oldRef, newRef *provider.Reference) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("Move", ctx, oldRef.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.Move(ctx, oldRef, newRef) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) GetMD(ctx context.Context, ref *provider.Reference, mdKeys, fieldMask []string) (*provider.ResourceInfo, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("GetMD", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return nil, err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.GetMD(ctx, ref, mdKeys, fieldMask) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return nil, err + } + } + + return res0, res1 +} + +func (f *FS) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys, fieldMask []string) ([]*provider.ResourceInfo, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("ListFolder", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return nil, err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.ListFolder(ctx, ref, mdKeys, fieldMask) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return nil, err + } + } + + return res0, res1 +} + +func (f *FS) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, metadata map[string]string) (map[string]string, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("InitiateUpload", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return nil, err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.InitiateUpload(ctx, ref, uploadLength, metadata) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return nil, err + } + } + + return res0, res1 +} + +func (f *FS) Upload(ctx context.Context, req storage.UploadRequest, uploadFunc storage.UploadFinishedFunc) (provider.ResourceInfo, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("Upload", ctx, req.Ref.GetResourceId().GetSpaceId()) + if err != nil { + return provider.ResourceInfo{}, err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.Upload(ctx, req, uploadFunc) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return provider.ResourceInfo{}, err + } + } + + return res0, res1 +} + +func (f *FS) Download(ctx context.Context, ref *provider.Reference) (io.ReadCloser, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("Download", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return nil, err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.Download(ctx, ref) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return nil, err + } + } + + return res0, res1 +} + +func (f *FS) ListRevisions(ctx context.Context, ref *provider.Reference) ([]*provider.FileVersion, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("ListRevisions", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return nil, err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.ListRevisions(ctx, ref) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return nil, err + } + } + + return res0, res1 +} + +func (f *FS) DownloadRevision(ctx context.Context, ref *provider.Reference, key string) (io.ReadCloser, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("DownloadRevision", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return nil, err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.DownloadRevision(ctx, ref, key) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return nil, err + } + } + + return res0, res1 +} + +func (f *FS) RestoreRevision(ctx context.Context, ref *provider.Reference, key string) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("RestoreRevision", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.RestoreRevision(ctx, ref, key) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) ListRecycle(ctx context.Context, ref *provider.Reference, key, relativePath string) ([]*provider.RecycleItem, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("ListRecycle", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return nil, err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.ListRecycle(ctx, ref, key, relativePath) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return nil, err + } + } + + return res0, res1 +} + +func (f *FS) RestoreRecycleItem(ctx context.Context, ref *provider.Reference, key, relativePath string, restoreRef *provider.Reference) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("RestoreRecycleItem", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.RestoreRecycleItem(ctx, ref, key, relativePath, restoreRef) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) PurgeRecycleItem(ctx context.Context, ref *provider.Reference, key, relativePath string) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("PurgeRecycleItem", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.PurgeRecycleItem(ctx, ref, key, relativePath) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) EmptyRecycle(ctx context.Context, ref *provider.Reference) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("EmptyRecycle", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.EmptyRecycle(ctx, ref) + for _, unhook := range unhooks { + _ = unhook() + } + return res0 +} + +func (f *FS) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("GetPathByID", ctx, id.GetSpaceId()) + if err != nil { + return "", err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.GetPathByID(ctx, id) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return "", err + } + } + + return res0, res1 +} + +func (f *FS) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("AddGrant", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.AddGrant(ctx, ref, g) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) DenyGrant(ctx context.Context, ref *provider.Reference, g *provider.Grantee) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("DenyGrant", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.DenyGrant(ctx, ref, g) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("RemoveGrant", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.RemoveGrant(ctx, ref, g) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) UpdateGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("UpdateGrant", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.UpdateGrant(ctx, ref, g) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) ListGrants(ctx context.Context, ref *provider.Reference) ([]*provider.Grant, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("ListGrants", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return nil, err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.ListGrants(ctx, ref) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return nil, err + } + } + + return res0, res1 +} + +func (f *FS) GetQuota(ctx context.Context, ref *provider.Reference) (uint64, uint64, uint64, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("GetQuota", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return 0, 0, 0, err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1, res2, res3 := f.next.GetQuota(ctx, ref) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return 0, 0, 0, err + } + } + + return res0, res1, res2, res3 +} + +func (f *FS) CreateReference(ctx context.Context, path string, targetURI *url.URL) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("CreateReference", ctx, "") + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.CreateReference(ctx, path, targetURI) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) Shutdown(ctx context.Context) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("Shutdown", ctx, "") + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.Shutdown(ctx) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("SetArbitraryMetadata", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.SetArbitraryMetadata(ctx, ref, md) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("UnsetArbitraryMetadata", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.UnsetArbitraryMetadata(ctx, ref, keys) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) SetLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("SetLock", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.SetLock(ctx, ref, lock) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) GetLock(ctx context.Context, ref *provider.Reference) (*provider.Lock, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("GetLock", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return nil, err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.GetLock(ctx, ref) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return nil, err + } + } + + return res0, res1 +} + +func (f *FS) RefreshLock(ctx context.Context, ref *provider.Reference, lock *provider.Lock, existingLockID string) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("RefreshLock", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.RefreshLock(ctx, ref, lock, existingLockID) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) Unlock(ctx context.Context, ref *provider.Reference, lock *provider.Lock) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("Unlock", ctx, ref.GetResourceId().GetSpaceId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.Unlock(ctx, ref, lock) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} + +func (f *FS) ListStorageSpaces(ctx context.Context, filter []*provider.ListStorageSpacesRequest_Filter, unrestricted bool) ([]*provider.StorageSpace, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("ListStorageSpaces", ctx, "") + if err != nil { + return nil, err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.ListStorageSpaces(ctx, filter, unrestricted) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return nil, err + } + } + + return res0, res1 +} + +func (f *FS) CreateStorageSpace(ctx context.Context, req *provider.CreateStorageSpaceRequest) (*provider.CreateStorageSpaceResponse, error) { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("CreateStorageSpace", ctx, "") + if err != nil { + return nil, err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.CreateStorageSpace(ctx, req) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return nil, err + } + } + + return res0, res1 +} + +func (f *FS) UpdateStorageSpace(ctx context.Context, req *provider.UpdateStorageSpaceRequest) (*provider.UpdateStorageSpaceResponse, error) { + var ( + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + id, err := storagespace.ParseID(req.StorageSpace.GetId().GetOpaqueId()) + if err != nil { + return nil, err + } + ctx, unhook, err = hook("UpdateStorageSpace", ctx, id.SpaceId) + if err != nil { + return nil, err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0, res1 := f.next.UpdateStorageSpace(ctx, req) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return nil, err + } + } + + return res0, res1 +} + +func (f *FS) DeleteStorageSpace(ctx context.Context, req *provider.DeleteStorageSpaceRequest) error { + var ( + err error + unhook UnHook + unhooks []UnHook + ) + for _, hook := range f.hooks { + ctx, unhook, err = hook("DeleteStorageSpace", ctx, req.GetId().GetOpaqueId()) + if err != nil { + return err + } + if unhook != nil { + unhooks = append(unhooks, unhook) + } + } + + res0 := f.next.DeleteStorageSpace(ctx, req) + + for _, unhook := range unhooks { + if err := unhook(); err != nil { + return err + } + } + + return res0 +} diff --git a/tests/acceptance/expected-failures-on-POSIX-storage.md b/tests/acceptance/expected-failures-on-POSIX-storage.md new file mode 100644 index 0000000000..77274c5231 --- /dev/null +++ b/tests/acceptance/expected-failures-on-POSIX-storage.md @@ -0,0 +1,433 @@ + ## Scenarios from OCIS API tests that are expected to fail with OCIS storage + + The expected failures in this file are from features in the owncloud/ocis repo. + +### File +Basic file management like up and download, move, copy, properties, quota, trash, versions and chunking. + +#### [invalid webdav responses for unauthorized requests.](https://github.com/owncloud/product/issues/273) +These tests succeed when running against ocis because there we handle the relevant authentication in the proxy. +- [coreApiTrashbin/trashbinFilesFolders.feature:278](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L278) +- [coreApiTrashbin/trashbinFilesFolders.feature:283](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L283) +- [coreApiTrashbin/trashbinFilesFolders.feature:297](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L297) +- [coreApiTrashbin/trashbinFilesFolders.feature:302](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L302) + +#### [Custom dav properties with namespaces are rendered incorrectly](https://github.com/owncloud/ocis/issues/2140) +_ocdav: double check the webdav property parsing when custom namespaces are used_ +- [coreApiWebdavProperties/setFileProperties.feature:36](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/setFileProperties.feature#L36) +- [coreApiWebdavProperties/setFileProperties.feature:37](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/setFileProperties.feature#L37) +- [coreApiWebdavProperties/setFileProperties.feature:42](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/setFileProperties.feature#L42) +- [coreApiWebdavProperties/setFileProperties.feature:79](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/setFileProperties.feature#L79) +- [coreApiWebdavProperties/setFileProperties.feature:78](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/setFileProperties.feature#L78) +- [coreApiWebdavProperties/setFileProperties.feature:84](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/setFileProperties.feature#L84) + +#### [Cannot set custom webDav properties](https://github.com/owncloud/product/issues/264) +- [coreApiWebdavProperties/getFileProperties.feature:348](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/getFileProperties.feature#L348) +- [coreApiWebdavProperties/getFileProperties.feature:349](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/getFileProperties.feature#L349) +- [coreApiWebdavProperties/getFileProperties.feature:354](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/getFileProperties.feature#L354) +- [coreApiWebdavProperties/getFileProperties.feature:384](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/getFileProperties.feature#L384) +- [coreApiWebdavProperties/getFileProperties.feature:385](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/getFileProperties.feature#L385) +- [coreApiWebdavProperties/getFileProperties.feature:390](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/getFileProperties.feature#L390) + +### Sync +Synchronization features like etag propagation, setting mtime and locking files + +#### [Uploading an old method chunked file with checksum should fail using new DAV path](https://github.com/owncloud/ocis/issues/2323) +- [coreApiMain/checksums.feature:268](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiMain/checksums.feature#L268) +- [coreApiMain/checksums.feature:273](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiMain/checksums.feature#L273) + +#### [Webdav LOCK operations](https://github.com/owncloud/ocis/issues/1284) +- [coreApiWebdavLocks2/independentLocks.feature:25](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L25) +- [coreApiWebdavLocks2/independentLocks.feature:26](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L26) +- [coreApiWebdavLocks2/independentLocks.feature:27](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L27) +- [coreApiWebdavLocks2/independentLocks.feature:28](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L28) +- [coreApiWebdavLocks2/independentLocks.feature:33](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L33) +- [coreApiWebdavLocks2/independentLocks.feature:34](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L34) +- [coreApiWebdavLocks2/independentLocks.feature:53](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L53) +- [coreApiWebdavLocks2/independentLocks.feature:54](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L54) +- [coreApiWebdavLocks2/independentLocks.feature:55](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L55) +- [coreApiWebdavLocks2/independentLocks.feature:56](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L56) +- [coreApiWebdavLocks2/independentLocks.feature:57](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L57) +- [coreApiWebdavLocks2/independentLocks.feature:58](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L58) +- [coreApiWebdavLocks2/independentLocks.feature:59](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L59) +- [coreApiWebdavLocks2/independentLocks.feature:60](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L60) +- [coreApiWebdavLocks2/independentLocks.feature:65](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L65) +- [coreApiWebdavLocks2/independentLocks.feature:66](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L66) +- [coreApiWebdavLocks2/independentLocks.feature:67](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L67) +- [coreApiWebdavLocks2/independentLocks.feature:68](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocks2/independentLocks.feature#L68) +- [coreApiWebdavLocksUnlock/unlock.feature:23](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocksUnlock/unlock.feature#L23) +- [coreApiWebdavLocksUnlock/unlock.feature:24](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocksUnlock/unlock.feature#L24) +- [coreApiWebdavLocksUnlock/unlock.feature:29](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocksUnlock/unlock.feature#L29) +- [coreApiWebdavLocksUnlock/unlock.feature:43](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocksUnlock/unlock.feature#L43) +- [coreApiWebdavLocksUnlock/unlock.feature:44](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocksUnlock/unlock.feature#L44) +- [coreApiWebdavLocksUnlock/unlock.feature:67](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocksUnlock/unlock.feature#L67) +- [coreApiWebdavLocksUnlock/unlock.feature:68](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocksUnlock/unlock.feature#L68) +- [coreApiWebdavLocksUnlock/unlock.feature:69](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocksUnlock/unlock.feature#L69) +- [coreApiWebdavLocksUnlock/unlock.feature:66](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocksUnlock/unlock.feature#L66) +- [coreApiWebdavLocksUnlock/unlock.feature:74](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocksUnlock/unlock.feature#L74) +- [coreApiWebdavLocksUnlock/unlock.feature:75](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavLocksUnlock/unlock.feature#L75) + + +#### [oc:privatelink property not returned in webdav responses](https://github.com/owncloud/product/issues/262) +- [coreApiWebdavProperties/getFileProperties.feature:301](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/getFileProperties.feature#L301) +- [coreApiWebdavProperties/getFileProperties.feature:302](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/getFileProperties.feature#L302) +- [coreApiWebdavProperties/getFileProperties.feature:307](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/getFileProperties.feature#L307) + +### User Management +User and group management features + +### Other +API, search, favorites, config, capabilities, not existing endpoints, CORS and others + +#### [no robots.txt available](https://github.com/owncloud/ocis/issues/1314) +- [coreApiMain/main.feature:7](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiMain/main.feature#L7) Scenario: robots.txt file should be accessible + +#### [Ability to return error messages in Webdav response bodies](https://github.com/owncloud/ocis/issues/1293) +- [coreApiAuth/ocsDELETEAuth.feature:7](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/ocsDELETEAuth.feature#L7) Scenario: send DELETE requests to OCS endpoints as admin with wrong password +- [coreApiAuth/ocsGETAuth.feature:10](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/ocsGETAuth.feature#L10) Scenario: using OCS anonymously +- [coreApiAuth/ocsGETAuth.feature:44](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/ocsGETAuth.feature#L44) Scenario: using OCS with non-admin basic auth +- [coreApiAuth/ocsGETAuth.feature:75](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/ocsGETAuth.feature#L75) Scenario: using OCS as normal user with wrong password +- [coreApiAuth/ocsGETAuth.feature:106](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/ocsGETAuth.feature#L106) Scenario:using OCS with admin basic auth +- [coreApiAuth/ocsGETAuth.feature:123](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/ocsGETAuth.feature#L123) Scenario: using OCS as admin user with wrong password +- [coreApiAuth/ocsPOSTAuth.feature:10](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/ocsPOSTAuth.feature#L10) Scenario: send POST requests to OCS endpoints as normal user with wrong password +- [coreApiAuth/ocsPUTAuth.feature:7](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/ocsPUTAuth.feature#L7) Scenario: send PUT request to OCS endpoints as admin with wrong password + +#### [sending MKCOL requests to another or non-existing user's webDav endpoints as normal user should return 404](https://github.com/owncloud/ocis/issues/5049) +_ocdav: api compatibility, return correct status code_ +- [coreApiAuth/webDavMKCOLAuth.feature:42](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/webDavMKCOLAuth.feature#L42) Scenario: send MKCOL requests to another user's webDav endpoints as normal user +- [coreApiAuth/webDavMKCOLAuth.feature:53](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/webDavMKCOLAuth.feature#L53) Scenario: send MKCOL requests to another user's webDav endpoints as normal user using the spaces WebDAV API + +#### [trying to lock file of another user gives http 200](https://github.com/owncloud/ocis/issues/2176) +- [coreApiAuth/webDavLOCKAuth.feature:46](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/webDavLOCKAuth.feature#L46) Scenario: send LOCK requests to another user's webDav endpoints as normal user +- [coreApiAuth/webDavLOCKAuth.feature:58](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/webDavLOCKAuth.feature#L58) Scenario: send LOCK requests to another user's webDav endpoints as normal user using the spaces WebDAV API + +#### [send (MOVE, COPY) requests to another user's webDav endpoints as normal user gives 400 instead of 403](https://github.com/owncloud/ocis/issues/3882) +_ocdav: api compatibility, return correct status code_ +- [coreApiAuth/webDavMOVEAuth.feature:55](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/webDavMOVEAuth.feature#L55) Scenario: send MOVE requests to another user's webDav endpoints as normal user using the spaces WebDAV API +- [coreApiAuth/webDavCOPYAuth.feature:55](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/webDavCOPYAuth.feature#L55) + +#### [send POST requests to another user's webDav endpoints as normal user](https://github.com/owncloud/ocis/issues/1287) +_ocdav: api compatibility, return correct status code_ +- [coreApiAuth/webDavPOSTAuth.feature:46](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/webDavPOSTAuth.feature#L46) Scenario: send POST requests to another user's webDav endpoints as normal user +- [coreApiAuth/webDavPOSTAuth.feature:55](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/webDavPOSTAuth.feature#L55) Scenario: send POST requests to another user's webDav endpoints as normal user using the spaces WebDAV API + +#### [Using double slash in URL to access a folder gives 501 and other status codes](https://github.com/owncloud/ocis/issues/1667) +- [coreApiAuth/webDavSpecialURLs.feature:123](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/webDavSpecialURLs.feature#L123) +- [coreApiAuth/webDavSpecialURLs.feature:134](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/webDavSpecialURLs.feature#L134) +- [coreApiAuth/webDavSpecialURLs.feature:165](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/webDavSpecialURLs.feature#L165) +- [coreApiAuth/webDavSpecialURLs.feature:176](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/webDavSpecialURLs.feature#L176) + +#### [Difference in response content of status.php and default capabilities](https://github.com/owncloud/ocis/issues/1286) +- [coreApiCapabilities/capabilitiesWithNormalUser.feature:13](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiCapabilities/capabilitiesWithNormalUser.feature#L13) Scenario: getting default capabilities with normal user + +#### [spaces endpoint does not allow REPORT requests](https://github.com/owncloud/ocis/issues/4034) +- [coreApiWebdavOperations/search.feature:42](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L42) +- [coreApiWebdavOperations/search.feature:43](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L43) +- [coreApiWebdavOperations/search.feature:48](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L48) +- [coreApiWebdavOperations/search.feature:64](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L64) +- [coreApiWebdavOperations/search.feature:65](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L65) +- [coreApiWebdavOperations/search.feature:70](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L70) +- [coreApiWebdavOperations/search.feature:87](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L87) +- [coreApiWebdavOperations/search.feature:88](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L88) +- [coreApiWebdavOperations/search.feature:93](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L93) +- [coreApiWebdavOperations/search.feature:102](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L102) +- [coreApiWebdavOperations/search.feature:103](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L103) +- [coreApiWebdavOperations/search.feature:108](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L108) +- [coreApiWebdavOperations/search.feature:126](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L126) +- [coreApiWebdavOperations/search.feature:127](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L127) +- [coreApiWebdavOperations/search.feature:132](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L132) +- [coreApiWebdavOperations/search.feature:150](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L150) +- [coreApiWebdavOperations/search.feature:151](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L151) +- [coreApiWebdavOperations/search.feature:156](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L156) +- [coreApiWebdavOperations/search.feature:176](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L176) +- [coreApiWebdavOperations/search.feature:175](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L175) +- [coreApiWebdavOperations/search.feature:181](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L181) +- [coreApiWebdavOperations/search.feature:209](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L209) +- [coreApiWebdavOperations/search.feature:208](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L208) +- [coreApiWebdavOperations/search.feature:214](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L214) +- [coreApiWebdavOperations/search.feature:241](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L241) +- [coreApiWebdavOperations/search.feature:240](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L240) +- [coreApiWebdavOperations/search.feature:246](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L246) +- [coreApiWebdavOperations/search.feature:266](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L266) +- [coreApiWebdavOperations/search.feature:265](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L265) +- [coreApiWebdavOperations/search.feature:271](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/search.feature#L271) + +#### [Support for favorites](https://github.com/owncloud/ocis/issues/1228) +- [coreApiFavorites/favorites.feature:117](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L117) +- [coreApiFavorites/favorites.feature:118](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L118) +- [coreApiFavorites/favorites.feature:169](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L169) +- [coreApiFavorites/favorites.feature:170](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L170) +- [coreApiFavorites/favorites.feature:202](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L202) +- [coreApiFavorites/favorites.feature:203](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L203) +- [coreApiFavorites/favorites.feature:175](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L175) +- [coreApiFavorites/favorites.feature:208](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L208) +- [coreApiFavorites/favorites.feature:221](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L221) +- [coreApiFavorites/favorites.feature:222](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L222) +- [coreApiFavorites/favorites.feature:144](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L144) +- [coreApiFavorites/favorites.feature:145](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L145) + +#### [WWW-Authenticate header for unauthenticated requests is not clear](https://github.com/owncloud/ocis/issues/2285) +- [coreApiWebdavOperations/refuseAccess.feature:21](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/refuseAccess.feature#L21) +- [coreApiWebdavOperations/refuseAccess.feature:22](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/refuseAccess.feature#L22) +- [coreApiWebdavOperations/refuseAccess.feature:34](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/refuseAccess.feature#L34) +- [coreApiWebdavOperations/refuseAccess.feature:35](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/refuseAccess.feature#L35) +- [coreApiWebdavOperations/refuseAccess.feature:40](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/refuseAccess.feature#L40) + +#### [Request to edit non-existing user by authorized admin gets unauthorized in http response](https://github.com/owncloud/ocis/issues/38423) +- [coreApiAuth/ocsPUTAuth.feature:22](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/ocsPUTAuth.feature#L22) + + +### Won't fix +Not everything needs to be implemented for ocis. While the oc10 testsuite covers these things we are not looking at them right now. + +* _The `OC-LazyOps` header is [no longer supported by the client](https://github.com/owncloud/client/pull/8398), implmenting this is not necessary for a first production release. We plan to have an uploed state machine to visualize the state of a file, see https://github.com/owncloud/ocis/issues/214_ +* _Blacklisted ignored files are no longer required because ocis can handle `.htaccess` files without security implications introduced by serving user provided files with apache._ + +#### [Blacklist files extensions](https://github.com/owncloud/ocis/issues/2177) +- [coreApiWebdavProperties/copyFile.feature:117](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/copyFile.feature#L117) +- [coreApiWebdavProperties/copyFile.feature:118](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/copyFile.feature#L118) +- [coreApiWebdavProperties/copyFile.feature:123](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/copyFile.feature#L123) +- [coreApiWebdavProperties/createFileFolder.feature:106](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/createFileFolder.feature#L106) +- [coreApiWebdavProperties/createFileFolder.feature:107](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/createFileFolder.feature#L107) +- [coreApiWebdavProperties/createFileFolder.feature:112](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavProperties/createFileFolder.feature#L112) +- [coreApiWebdavUpload/uploadFile.feature:180](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUpload/uploadFile.feature#L180) +- [coreApiWebdavUpload/uploadFile.feature:181](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUpload/uploadFile.feature#L181) +- [coreApiWebdavUpload/uploadFile.feature:186](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUpload/uploadFile.feature#L186) +- [coreApiWebdavMove2/moveFile.feature:179](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L179) +- [coreApiWebdavMove2/moveFile.feature:217](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L217) +- [coreApiWebdavMove2/moveFile.feature:218](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L218) + +#### [cannot set blacklisted file names](https://github.com/owncloud/product/issues/260) +- [coreApiWebdavMove1/moveFolderToBlacklistedName.feature:20](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove1/moveFolderToBlacklistedName.feature#L20) +- [coreApiWebdavMove1/moveFolderToBlacklistedName.feature:21](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove1/moveFolderToBlacklistedName.feature#L21) +- [coreApiWebdavMove1/moveFolderToBlacklistedName.feature:26](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove1/moveFolderToBlacklistedName.feature#L26) +- [coreApiWebdavMove2/moveFileToBlacklistedName.feature:18](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFileToBlacklistedName.feature#L18) +- [coreApiWebdavMove2/moveFileToBlacklistedName.feature:19](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFileToBlacklistedName.feature#L19) + +### To triage +_The below features have been added after I last categorized them. AFAICT they are bugs. @jfd_ + +#### [PATCH request for TUS upload with wrong checksum gives incorrect response](https://github.com/owncloud/ocis/issues/1755) +- [coreApiWebdavUploadTUS/checksums.feature:86](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L86) +- [coreApiWebdavUploadTUS/checksums.feature:87](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L87) +- [coreApiWebdavUploadTUS/checksums.feature:88](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L88) +- [coreApiWebdavUploadTUS/checksums.feature:89](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L89) +- [coreApiWebdavUploadTUS/checksums.feature:94](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L94) +- [coreApiWebdavUploadTUS/checksums.feature:95](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L95) +- [coreApiWebdavUploadTUS/checksums.feature:175](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L175) +- [coreApiWebdavUploadTUS/checksums.feature:176](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L176) +- [coreApiWebdavUploadTUS/checksums.feature:181](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L181) +- [coreApiWebdavUploadTUS/checksums.feature:228](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L228) +- [coreApiWebdavUploadTUS/checksums.feature:229](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L229) +- [coreApiWebdavUploadTUS/checksums.feature:230](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L230) +- [coreApiWebdavUploadTUS/checksums.feature:231](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L231) +- [coreApiWebdavUploadTUS/checksums.feature:236](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L236) +- [coreApiWebdavUploadTUS/checksums.feature:237](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L237) +- [coreApiWebdavUploadTUS/checksums.feature:284](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L284) +- [coreApiWebdavUploadTUS/checksums.feature:285](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L285) +- [coreApiWebdavUploadTUS/checksums.feature:286](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L286) +- [coreApiWebdavUploadTUS/checksums.feature:287](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L287) +- [coreApiWebdavUploadTUS/checksums.feature:292](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L292) +- [coreApiWebdavUploadTUS/checksums.feature:293](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/checksums.feature#L293) +- [coreApiWebdavUploadTUS/optionsRequest.feature:10](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/optionsRequest.feature#L10) +- [coreApiWebdavUploadTUS/optionsRequest.feature:25](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/optionsRequest.feature#L25) + +#### [TUS OPTIONS requests do not reply with TUS headers when invalid password](https://github.com/owncloud/ocis/issues/1012) +- [coreApiWebdavUploadTUS/optionsRequest.feature:40](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/optionsRequest.feature#L40) +- [coreApiWebdavUploadTUS/optionsRequest.feature:55](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/optionsRequest.feature#L55) + +### [Content-type is not multipart/byteranges when downloading file with Range Header](https://github.com/owncloud/ocis/issues/2677) +- [coreApiWebdavOperations/downloadFile.feature:183](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/downloadFile.feature#L183) +- [coreApiWebdavOperations/downloadFile.feature:184](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/downloadFile.feature#L184) +- [coreApiWebdavOperations/downloadFile.feature:189](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/downloadFile.feature#L189) + +### [send PUT requests to another user's webDav endpoints as normal user](https://github.com/owncloud/ocis/issues/2893) +- [coreApiAuth/webDavPUTAuth.feature:46](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/webDavPUTAuth.feature#L46) +- [coreApiAuth/webDavPUTAuth.feature:58](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiAuth/webDavPUTAuth.feature#L58) + +#### [Renaming resource to banned name is allowed in spaces webdav](https://github.com/owncloud/ocis/issues/3099) +- [coreApiWebdavMove1/moveFolder.feature:44](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L44) +- [coreApiWebdavMove1/moveFolder.feature:62](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L62) +- [coreApiWebdavMove1/moveFolder.feature:80](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L80) +- [coreApiWebdavMove2/moveFile.feature:223](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L223) +- [coreApiWebdavMove2/moveFileToBlacklistedName.feature:24](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFileToBlacklistedName.feature#L24) + +#### [REPORT method on spaces returns an incorrect d:href response](https://github.com/owncloud/ocis/issues/3111) +- [coreApiFavorites/favorites.feature:123](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L123) +- [coreApiFavorites/favorites.feature:227](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L227) +- - [coreApiFavorites/favorites.feature:150](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiFavorites/favorites.feature#L150) + +#### [Cannot disable the dav propfind depth infinity for resources](https://github.com/owncloud/ocis/issues/3720) +- [coreApiWebdavOperations/propfind.feature:39](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/propfind.feature#L39) + +#### [HTTP status code differ while deleting file of another user's trash bin](https://github.com/owncloud/ocis/issues/3544) + +- [coreApiTrashbin/trashbinDelete.feature:105](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L105) + +#### [Default capabilities for normal user and admin user not same as in oC-core](https://github.com/owncloud/ocis/issues/1285) +- [coreApiCapabilities/capabilities.feature:10](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiCapabilities/capabilities.feature#L10) +- [coreApiCapabilities/capabilities.feature:135](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiCapabilities/capabilities.feature#L135) +- [coreApiCapabilities/capabilities.feature:174](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiCapabilities/capabilities.feature#L174) + +### [MOVE a file into same folder with same name returns 404 instead of 403](https://github.com/owncloud/ocis/issues/1976) + +- [coreApiWebdavMove2/moveFile.feature:120](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L120) +- [coreApiWebdavMove2/moveFile.feature:121](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L121) +- [coreApiWebdavMove2/moveFile.feature:126](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L126) + +### posixfs doesn't do versions at that point +- [coreApiWebdavUploadTUS/uploadFile.feature:146](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFile.feature#L146) +- [coreApiWebdavUploadTUS/uploadFile.feature:147](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFile.feature#L147) +- [coreApiWebdavUploadTUS/uploadFile.feature:152](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFile.feature#L152) +- [coreApiWebdavUploadTUS/uploadFile.feature:173](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFile.feature#L173) +- [coreApiWebdavUploadTUS/uploadFile.feature:174](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFile.feature#L174) +- [coreApiWebdavUploadTUS/uploadFile.feature:179](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavUploadTUS/uploadFile.feature#L179) +- [coreApiVersions/fileVersions.feature:26](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L26) +- [coreApiVersions/fileVersions.feature:34](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L34) +- [coreApiVersions/fileVersions.feature:43](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L43) +- [coreApiVersions/fileVersions.feature:53](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L53) +- [coreApiVersions/fileVersions.feature:62](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L62) +- [coreApiVersions/fileVersions.feature:71](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L71) +- [coreApiVersions/fileVersions.feature:84](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L84) +- [coreApiVersions/fileVersions.feature:120](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L120) +- [coreApiVersions/fileVersions.feature:129](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L129) +- [coreApiVersions/fileVersions.feature:148](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L148) +- [coreApiVersions/fileVersions.feature:158](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L158) +- [coreApiVersions/fileVersions.feature:176](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L176) +- [coreApiVersions/fileVersions.feature:235](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L235) +- [coreApiVersions/fileVersions.feature:251](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L251) +- [coreApiVersions/fileVersions.feature:258](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L258) +- [coreApiVersions/fileVersions.feature:266](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L266) +- [coreApiVersions/fileVersions.feature:474](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L474) +- [coreApiVersions/fileVersions.feature:488](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiVersions/fileVersions.feature#L488) +- [coreApiWebdavMove2/moveFile.feature:45](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L45) +- [coreApiWebdavMove2/moveFile.feature:46](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L46) +- [coreApiWebdavMove2/moveFile.feature:51](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L51) + +### posixfs doesn't do trashbin at that point +- [coreApiTrashbin/trashbinDelete.feature:53](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L53) +- [coreApiTrashbin/trashbinDelete.feature:58](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L58) +- [coreApiTrashbin/trashbinDelete.feature:80](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L80) +- [coreApiTrashbin/trashbinDelete.feature:85](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L85) +- [coreApiTrashbin/trashbinDelete.feature:104](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L104) +- [coreApiTrashbin/trashbinDelete.feature:123](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L123) +- [coreApiTrashbin/trashbinDelete.feature:128](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L128) +- [coreApiTrashbin/trashbinDelete.feature:146](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L146) +- [coreApiTrashbin/trashbinDelete.feature:151](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L151) +- [coreApiTrashbin/trashbinDelete.feature:171](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L171) +- [coreApiTrashbin/trashbinDelete.feature:176](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L176) +- [coreApiTrashbin/trashbinDelete.feature:196](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L196) +- [coreApiTrashbin/trashbinDelete.feature:201](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L201) +- [coreApiTrashbin/trashbinDelete.feature:233](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L233) +- [coreApiTrashbin/trashbinDelete.feature:238](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L238) +- [coreApiTrashbin/trashbinDelete.feature:270](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L270) +- [coreApiTrashbin/trashbinDelete.feature:275](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L275) +- [coreApiTrashbin/trashbinDelete.feature:319](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L319) +- [coreApiTrashbin/trashbinDelete.feature:324](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinDelete.feature#L324) +- [coreApiTrashbin/trashbinFilesFolders.feature:20](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L20) +- [coreApiTrashbin/trashbinFilesFolders.feature:25](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L25) +- [coreApiTrashbin/trashbinFilesFolders.feature:36](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L36) +- [coreApiTrashbin/trashbinFilesFolders.feature:41](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L41) +- [coreApiTrashbin/trashbinFilesFolders.feature:55](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L55) +- [coreApiTrashbin/trashbinFilesFolders.feature:60](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L60) +- [coreApiTrashbin/trashbinFilesFolders.feature:141](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L141) +- [coreApiTrashbin/trashbinFilesFolders.feature:146](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L146) +- [coreApiTrashbin/trashbinFilesFolders.feature:164](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L164) +- [coreApiTrashbin/trashbinFilesFolders.feature:169](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L169) +- [coreApiTrashbin/trashbinFilesFolders.feature:315](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L315) +- [coreApiTrashbin/trashbinFilesFolders.feature:316](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L316) +- [coreApiTrashbin/trashbinFilesFolders.feature:317](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L317) +- [coreApiTrashbin/trashbinFilesFolders.feature:322](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L322) +- [coreApiTrashbin/trashbinFilesFolders.feature:323](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L323) +- [coreApiTrashbin/trashbinFilesFolders.feature:324](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L324) +- [coreApiTrashbin/trashbinFilesFolders.feature:336](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L336) +- [coreApiTrashbin/trashbinFilesFolders.feature:341](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L341) +- [coreApiTrashbin/trashbinFilesFolders.feature:356](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L356) +- [coreApiTrashbin/trashbinFilesFolders.feature:361](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L361) +- [coreApiTrashbin/trashbinFilesFolders.feature:373](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L373) +- [coreApiTrashbin/trashbinFilesFolders.feature:378](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L378) +- [coreApiTrashbin/trashbinFilesFolders.feature:434](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L434) +- [coreApiTrashbin/trashbinFilesFolders.feature:439](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L439) +- [coreApiTrashbin/trashbinFilesFolders.feature:495](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L495) +- [coreApiTrashbin/trashbinFilesFolders.feature:500](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbin/trashbinFilesFolders.feature#L500) +- [coreApiTrashbinRestore/trashbinRestore.feature:34](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L34) +- [coreApiTrashbinRestore/trashbinRestore.feature:35](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L35) +- [coreApiTrashbinRestore/trashbinRestore.feature:50](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L50) +- [coreApiTrashbinRestore/trashbinRestore.feature:51](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L51) +- [coreApiTrashbinRestore/trashbinRestore.feature:68](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L68) +- [coreApiTrashbinRestore/trashbinRestore.feature:69](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L69) +- [coreApiTrashbinRestore/trashbinRestore.feature:88](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L88) +- [coreApiTrashbinRestore/trashbinRestore.feature:89](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L89) +- [coreApiTrashbinRestore/trashbinRestore.feature:90](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L90) +- [coreApiTrashbinRestore/trashbinRestore.feature:91](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L91) +- [coreApiTrashbinRestore/trashbinRestore.feature:92](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L92) +- [coreApiTrashbinRestore/trashbinRestore.feature:93](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L93) +- [coreApiTrashbinRestore/trashbinRestore.feature:108](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L108) +- [coreApiTrashbinRestore/trashbinRestore.feature:109](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L109) +- [coreApiTrashbinRestore/trashbinRestore.feature:110](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L110) +- [coreApiTrashbinRestore/trashbinRestore.feature:111](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L111) +- [coreApiTrashbinRestore/trashbinRestore.feature:130](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L130) +- [coreApiTrashbinRestore/trashbinRestore.feature:131](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L131) +- [coreApiTrashbinRestore/trashbinRestore.feature:145](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L145) +- [coreApiTrashbinRestore/trashbinRestore.feature:146](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L146) +- [coreApiTrashbinRestore/trashbinRestore.feature:160](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L160) +- [coreApiTrashbinRestore/trashbinRestore.feature:161](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L161) +- [coreApiTrashbinRestore/trashbinRestore.feature:175](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L175) +- [coreApiTrashbinRestore/trashbinRestore.feature:176](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L176) +- [coreApiTrashbinRestore/trashbinRestore.feature:190](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L190) +- [coreApiTrashbinRestore/trashbinRestore.feature:191](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L191) +- [coreApiTrashbinRestore/trashbinRestore.feature:192](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L192) +- [coreApiTrashbinRestore/trashbinRestore.feature:193](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L193) +- [coreApiTrashbinRestore/trashbinRestore.feature:194](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L194) +- [coreApiTrashbinRestore/trashbinRestore.feature:195](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L195) +- [coreApiTrashbinRestore/trashbinRestore.feature:212](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L212) +- [coreApiTrashbinRestore/trashbinRestore.feature:213](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L213) +- [coreApiTrashbinRestore/trashbinRestore.feature:230](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L230) +- [coreApiTrashbinRestore/trashbinRestore.feature:231](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L231) +- [coreApiTrashbinRestore/trashbinRestore.feature:250](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L250) +- [coreApiTrashbinRestore/trashbinRestore.feature:251](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L251) +- [coreApiTrashbinRestore/trashbinRestore.feature:270](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L270) +- [coreApiTrashbinRestore/trashbinRestore.feature:271](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L271) +- [coreApiTrashbinRestore/trashbinRestore.feature:304](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L304) +- [coreApiTrashbinRestore/trashbinRestore.feature:305](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L305) +- [coreApiTrashbinRestore/trashbinRestore.feature:343](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L343) +- [coreApiTrashbinRestore/trashbinRestore.feature:344](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L344) +- [coreApiTrashbinRestore/trashbinRestore.feature:387](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L387) +- [coreApiTrashbinRestore/trashbinRestore.feature:388](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L388) +- [coreApiTrashbinRestore/trashbinRestore.feature:405](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L405) +- [coreApiTrashbinRestore/trashbinRestore.feature:406](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L406) +- [coreApiTrashbinRestore/trashbinRestore.feature:424](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L424) +- [coreApiTrashbinRestore/trashbinRestore.feature:425](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L425) +- [coreApiTrashbinRestore/trashbinRestore.feature:448](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L448) +- [coreApiTrashbinRestore/trashbinRestore.feature:449](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L449) +- [coreApiTrashbinRestore/trashbinRestore.feature:467](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L467) +- [coreApiTrashbinRestore/trashbinRestore.feature:468](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L468) +- [coreApiTrashbinRestore/trashbinRestore.feature:482](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L482) +- [coreApiTrashbinRestore/trashbinRestore.feature:483](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L483) +- [coreApiTrashbinRestore/trashbinRestore.feature:536](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L536) +- [coreApiTrashbinRestore/trashbinRestore.feature:537](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L537) +- [coreApiTrashbinRestore/trashbinRestore.feature:552](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L552) +- [coreApiTrashbinRestore/trashbinRestore.feature:553](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L553) +- [coreApiTrashbinRestore/trashbinRestore.feature:568](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L568) +- [coreApiTrashbinRestore/trashbinRestore.feature:569](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiTrashbinRestore/trashbinRestore.feature#L569) +- [coreApiWebdavEtagPropagation2/restoreFromTrash.feature:28](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavEtagPropagation2/restoreFromTrash.feature#L28) +- [coreApiWebdavEtagPropagation2/restoreFromTrash.feature:29](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavEtagPropagation2/restoreFromTrash.feature#L29) +- [coreApiWebdavEtagPropagation2/restoreFromTrash.feature:51](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavEtagPropagation2/restoreFromTrash.feature#L51) +- [coreApiWebdavEtagPropagation2/restoreFromTrash.feature:52](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavEtagPropagation2/restoreFromTrash.feature#L52) +- [coreApiWebdavEtagPropagation2/restoreFromTrash.feature:72](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavEtagPropagation2/restoreFromTrash.feature#L72) +- [coreApiWebdavEtagPropagation2/restoreFromTrash.feature:73](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavEtagPropagation2/restoreFromTrash.feature#L73) +- [coreApiWebdavEtagPropagation2/restoreFromTrash.feature:95](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavEtagPropagation2/restoreFromTrash.feature#L95) +- [coreApiWebdavEtagPropagation2/restoreFromTrash.feature:96](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavEtagPropagation2/restoreFromTrash.feature#L96) +- [coreApiWebdavEtagPropagation2/restoreVersion.feature:13](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavEtagPropagation2/restoreVersion.feature#L13) +- [coreApiWebdavMove2/moveFile.feature:141](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L141) +- [coreApiWebdavMove2/moveFile.feature:142](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L142) +- [coreApiWebdavMove2/moveFile.feature:147](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove2/moveFile.feature#L147) +- [coreApiWebdavOperations/listFiles.feature:241](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/listFiles.feature#L241) +- [coreApiWebdavOperations/listFiles.feature:242](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/listFiles.feature#L242) +- [coreApiWebdavOperations/listFiles.feature:247](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavOperations/listFiles.feature#L247) +- [coreApiWebdavMove1/moveFolder.feature:275](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L275) +- [coreApiWebdavMove1/moveFolder.feature:276](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L276) +- [coreApiWebdavMove1/moveFolder.feature:281](https://github.com/owncloud/ocis/blob/master/tests/acceptance/features/coreApiWebdavMove1/moveFolder.feature#L281) + +- Note: always have an empty line at the end of this file. +The bash script that processes this file may not process a scenario reference on the last line. diff --git a/tests/oc-integration-tests/drone/storage-users-posixfs.toml b/tests/oc-integration-tests/drone/storage-users-posixfs.toml new file mode 100644 index 0000000000..a3f0b09b9c --- /dev/null +++ b/tests/oc-integration-tests/drone/storage-users-posixfs.toml @@ -0,0 +1,51 @@ +# This config file will start a reva service that: +# - uses the posix driver to serve users (/users) +# - serves the storage provider on grpc port 11000 +# - serves http dataprovider for this storage on port 11001 +# - /data - dataprovider: file up and download + +[shared] +jwt_secret = "Pive-Fumkiu4" +gatewaysvc = "localhost:19000" + +[grpc] +address = "0.0.0.0:11000" + +# This is a storage provider that grants direct access to the wrapped storage +[grpc.services.storageprovider] +driver = "posix" +expose_data_server = true +data_server_url = "http://revad-services:11001/data" +gateway_addr = "0.0.0.0:19000" +mount_id = "1284d238-aa92-42ce-bdc4-0b0000009157" + +[grpc.services.storageprovider.drivers.posix] +root = "/drone/src/tmp/reva/data" +permissionssvc = "localhost:10000" +treetime_accounting = true +treesize_accounting = true +personalspacepath_template = "users/{{.User.Username}}" +generalspacepath_template = "projects/{{.SpaceId}}" + +[grpc.services.storageprovider.drivers.posix.idcache] +cache_store = "redis" +cache_nodes = [ "redis://redis:6379" ] + + +# we have a locally running dataprovider +[http] +address = "0.0.0.0:11001" + +[http.services.dataprovider] +driver = "posix" +temp_folder = "/drone/src/tmp/reva/tmp" + +[http.services.dataprovider.drivers.posix] +root = "/drone/src/tmp/reva/data" +permissionssvc = "localhost:10000" +treetime_accounting = true +treesize_accounting = true + +[http.services.dataprovider.drivers.posix.idcache] +cache_store = "redis" +cache_nodes = [ "redis://redis:6379" ] \ No newline at end of file