diff --git a/changelog/unreleased/owncloudsql-storage-driver.md b/changelog/unreleased/owncloudsql-storage-driver.md new file mode 100644 index 0000000000..2ee9ff6a79 --- /dev/null +++ b/changelog/unreleased/owncloudsql-storage-driver.md @@ -0,0 +1,7 @@ +Enhancement: owncloudsql storage driver + +This PR adds a storage driver which connects to a oc10 storage backend +(storage + database). +This allows for running oc10 and ocis with the same backend in parallel. + +https://github.com/cs3org/reva/pull/1710 \ No newline at end of file diff --git a/pkg/storage/fs/loader/loader.go b/pkg/storage/fs/loader/loader.go index dd08dc6354..2ccb0d7c9b 100644 --- a/pkg/storage/fs/loader/loader.go +++ b/pkg/storage/fs/loader/loader.go @@ -28,6 +28,7 @@ import ( _ "github.com/cs3org/reva/pkg/storage/fs/localhome" _ "github.com/cs3org/reva/pkg/storage/fs/ocis" _ "github.com/cs3org/reva/pkg/storage/fs/owncloud" + _ "github.com/cs3org/reva/pkg/storage/fs/owncloudsql" _ "github.com/cs3org/reva/pkg/storage/fs/s3" _ "github.com/cs3org/reva/pkg/storage/fs/s3ng" // Add your own here diff --git a/pkg/storage/fs/owncloudsql/filecache/filecache.go b/pkg/storage/fs/owncloudsql/filecache/filecache.go new file mode 100644 index 0000000000..44f6fbddcb --- /dev/null +++ b/pkg/storage/fs/owncloudsql/filecache/filecache.go @@ -0,0 +1,502 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package filecache + +import ( + "crypto/md5" + "database/sql" + "encoding/hex" + "fmt" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + conversions "github.com/cs3org/reva/internal/http/services/owncloud/ocs/conversions" + + "github.com/pkg/errors" + "github.com/rs/zerolog/log" +) + +// Cache represents a oc10-style file cache +type Cache struct { + driver string + db *sql.DB +} + +// NewMysql returns a new Cache instance connecting to a MySQL database +func NewMysql(dsn string) (*Cache, error) { + sqldb, err := sql.Open("mysql", dsn) + if err != nil { + return nil, errors.Wrap(err, "error connecting to the database") + } + sqldb.SetConnMaxLifetime(time.Minute * 3) + sqldb.SetMaxOpenConns(10) + sqldb.SetMaxIdleConns(10) + + err = sqldb.Ping() + if err != nil { + return nil, errors.Wrap(err, "error connecting to the database") + } + + return New("mysql", sqldb) +} + +// New returns a new Cache instance connecting to the given sql.DB +func New(driver string, sqldb *sql.DB) (*Cache, error) { + return &Cache{ + driver: driver, + db: sqldb, + }, nil +} + +// GetNumericStorageID returns the database id for the given storage +func (c *Cache) GetNumericStorageID(id string) (int, error) { + row := c.db.QueryRow("Select numeric_id from oc_storages where id = ?", id) + var nid int + switch err := row.Scan(&nid); err { + case nil: + return nid, nil + default: + return -1, err + } +} + +// File represents an entry of the file cache +type File struct { + ID int + Storage int + Parent int + MimePart int + MimeType int + Size int + MTime int + StorageMTime int + UnencryptedSize int + Permissions int + Encrypted bool + Path string + Name string + Etag string + Checksum string +} + +// TrashItem represents a trash item of the file cache +type TrashItem struct { + ID int + Name string + User string + Path string + Timestamp int +} + +// Scannable describes the interface providing a Scan method +type Scannable interface { + Scan(...interface{}) error +} + +func (c *Cache) rowToFile(row Scannable) (*File, error) { + var fileid, storage, parent, mimetype, mimepart, size, mtime, storageMtime, encrypted, unencryptedSize, permissions int + var path, name, etag, checksum string + err := row.Scan(&fileid, &storage, &path, &parent, &permissions, &mimetype, &mimepart, &size, &mtime, &storageMtime, &encrypted, &unencryptedSize, &name, &etag, &checksum) + if err != nil { + return nil, err + } + + return &File{ + ID: fileid, + Storage: storage, + Path: path, + Parent: parent, + Permissions: permissions, + MimeType: mimetype, + MimePart: mimepart, + Size: size, + MTime: mtime, + StorageMTime: storageMtime, + Encrypted: encrypted == 1, + UnencryptedSize: unencryptedSize, + Name: name, + Etag: etag, + Checksum: checksum, + }, nil +} + +// Get returns the cache entry for the specified storage/path +func (c *Cache) Get(s interface{}, p string) (*File, error) { + storageID, err := toIntID(s) + if err != nil { + return nil, err + } + + phashBytes := md5.Sum([]byte(p)) + phash := hex.EncodeToString(phashBytes[:]) + + row := c.db.QueryRow("Select fileid, storage, path, parent, permissions, mimetype, mimepart, size, mtime, storage_mtime, encrypted, unencrypted_size, name, etag, checksum from oc_filecache where path_hash = ? and storage = ?", phash, storageID) + return c.rowToFile(row) +} + +// Path returns the path for the specified entry +func (c *Cache) Path(id interface{}) (string, error) { + id, err := toIntID(id) + if err != nil { + return "", err + } + + row := c.db.QueryRow("Select path from oc_filecache where fileid = ?", id) + var path string + err = row.Scan(&path) + if err != nil { + return "", err + } + return path, nil +} + +// Permissions returns the permissions for the specified storage/path +func (c *Cache) Permissions(storage interface{}, p string) (*provider.ResourcePermissions, error) { + entry, err := c.Get(storage, p) + if err != nil { + return nil, err + } + + perms, err := conversions.NewPermissions(entry.Permissions) + if err != nil { + return nil, err + } + + return conversions.RoleFromOCSPermissions(perms).CS3ResourcePermissions(), nil +} + +// InsertOrUpdate creates or updates a cache entry +func (c *Cache) InsertOrUpdate(storage interface{}, data map[string]interface{}) (int, error) { + storageID, err := toIntID(storage) + if err != nil { + return -1, err + } + + columns := []string{"storage"} + placeholders := []string{"?"} + values := []interface{}{storage} + + for _, key := range []string{"path", "mimetype", "etag"} { + if _, exists := data[key]; !exists { + return -1, fmt.Errorf("missing required data") + } + } + + path := data["path"].(string) + parentPath := strings.TrimRight(filepath.Dir(path), "/") + if parentPath == "." { + parentPath = "" + } + parent, err := c.Get(storageID, parentPath) + if err != nil { + return -1, fmt.Errorf("could not find parent %s, %s, %v, %w", parentPath, path, parent, err) + } + data["parent"] = parent.ID + data["name"] = filepath.Base(path) + if _, exists := data["checksum"]; !exists { + data["checksum"] = "" + } + + for k, v := range data { + switch k { + case "path": + phashBytes := md5.Sum([]byte(v.(string))) + phash := hex.EncodeToString(phashBytes[:]) + columns = append(columns, "path_hash") + values = append(values, phash) + placeholders = append(placeholders, "?") + case "storage_mtime": + if _, exists := data["mtime"]; !exists { + columns = append(columns, "mtime") + values = append(values, v) + placeholders = append(placeholders, "?") + } + case "mimetype": + parts := strings.Split(v.(string), "/") + columns = append(columns, "mimetype") + values = append(values, v) + placeholders = append(placeholders, "(SELECT id from oc_mimetypes where mimetype=?)") + columns = append(columns, "mimepart") + values = append(values, parts[0]) + placeholders = append(placeholders, "(SELECT id from oc_mimetypes where mimetype=?)") + continue + } + + columns = append(columns, k) + values = append(values, v) + placeholders = append(placeholders, "?") + } + + err = c.InsertMimetype(data["mimetype"].(string)) + if err != nil { + return -1, err + } + + query := "INSERT INTO oc_filecache( " + strings.Join(columns, ", ") + ") VALUES(" + strings.Join(placeholders, ",") + ")" + + updates := []string{} + for i, column := range columns { + if column != "path" && column != "path_hash" && column != "storage" { + updates = append(updates, column+"="+placeholders[i]) + values = append(values, values[i]) + } + } + if c.driver == "mysql" { // mysql upsert + query += " ON DUPLICATE KEY UPDATE " + } else { // sqlite3 upsert + query += " ON CONFLICT(storage,path_hash) DO UPDATE SET " + } + query += strings.Join(updates, ",") + + stmt, err := c.db.Prepare(query) + if err != nil { + return -1, err + } + + res, err := stmt.Exec(values...) + if err != nil { + log.Err(err).Msg("could not store filecache item") + return -1, err + } + id, err := res.LastInsertId() + if err != nil { + return -1, err + } + return int(id), nil +} + +// Copy creates a copy of the specified entry at the target path +func (c *Cache) Copy(storage interface{}, sourcePath, targetPath string) (int, error) { + storageID, err := toIntID(storage) + if err != nil { + return -1, err + } + source, err := c.Get(storageID, sourcePath) + if err != nil { + return -1, errors.Wrap(err, "could not find source") + } + + row := c.db.QueryRow("SELECT mimetype FROM oc_mimetypes WHERE id=?", source.MimeType) + var mimetype string + err = row.Scan(&mimetype) + if err != nil { + return -1, errors.Wrap(err, "could not find source mimetype") + } + + data := map[string]interface{}{ + "path": targetPath, + "checksum": source.Checksum, + "mimetype": mimetype, + "permissions": source.Permissions, + "etag": source.Etag, + "size": source.Size, + "mtime": source.MTime, + "storage_mtime": source.StorageMTime, + "encrypted": source.Encrypted, + "unencrypted_size": source.UnencryptedSize, + } + return c.InsertOrUpdate(storage, data) +} + +// Move moves the specified entry to the target path +func (c *Cache) Move(storage interface{}, sourcePath, targetPath string) error { + storageID, err := toIntID(storage) + if err != nil { + return err + } + source, err := c.Get(storageID, sourcePath) + if err != nil { + return errors.Wrap(err, "could not find source") + } + newParentPath := strings.TrimRight(filepath.Dir(targetPath), "/") + newParent, err := c.Get(storageID, newParentPath) + if err != nil { + return errors.Wrap(err, "could not find new parent") + } + + tx, err := c.db.Begin() + if err != nil { + return err + } + defer func() { _ = tx.Rollback() }() + stmt, err := tx.Prepare("UPDATE oc_filecache SET parent=?, path=?, name=?, path_hash=? WHERE storage = ? and fileid=?") + if err != nil { + return err + } + defer stmt.Close() + phashBytes := md5.Sum([]byte(targetPath)) + _, err = stmt.Exec(newParent.ID, targetPath, filepath.Base(targetPath), hex.EncodeToString(phashBytes[:]), storageID, source.ID) + if err != nil { + return err + } + + childRows, err := tx.Query("SELECT fileid, path from oc_filecache where parent = ?", source.ID) + if err != nil { + return err + } + defer childRows.Close() + children := map[int]string{} + for childRows.Next() { + var ( + id int + path string + ) + err = childRows.Scan(&id, &path) + if err != nil { + return err + } + + children[id] = path + } + for id, path := range children { + path = strings.ReplaceAll(path, sourcePath, targetPath) + phashBytes = md5.Sum([]byte(path)) + _, err = stmt.Exec(source.ID, path, filepath.Base(path), hex.EncodeToString(phashBytes[:]), storageID, id) + if err != nil { + return err + } + } + + return tx.Commit() +} + +// Delete removes the specified storage/path from the cache +func (c *Cache) Delete(storage interface{}, user, path, trashPath string) error { + err := c.Move(storage, path, trashPath) + if err != nil { + return err + } + + re := regexp.MustCompile(`(.*)\.d(\d+)$`) + parts := re.FindStringSubmatch(filepath.Base(trashPath)) + + query := "INSERT INTO oc_files_trash(user,id,timestamp,location) VALUES(?,?,?,?)" + stmt, err := c.db.Prepare(query) + if err != nil { + return err + } + + relativeLocation, err := filepath.Rel("files/", filepath.Dir(path)) + if err != nil { + return err + } + _, err = stmt.Exec(user, filepath.Base(parts[1]), parts[2], relativeLocation) + if err != nil { + log.Err(err).Msg("could not store filecache item") + return err + } + + return nil +} + +// GetRecycleItem returns the specified recycle item +func (c *Cache) GetRecycleItem(user, path string, timestamp int) (*TrashItem, error) { + row := c.db.QueryRow("SELECT auto_id, id, location FROM oc_files_trash WHERE id = ? and user = ? and timestamp = ?", path, user, timestamp) + var autoID int + var id, location string + err := row.Scan(&autoID, &id, &location) + if err != nil { + return nil, err + } + + return &TrashItem{ + ID: autoID, + Name: id, + User: user, + Path: location, + Timestamp: timestamp, + }, nil +} + +// PurgeRecycleItem deletes the specified item from the cache +func (c *Cache) PurgeRecycleItem(user, path string, timestamp int) error { + row := c.db.QueryRow("Select auto_id, location from oc_files_trash where id = ? and user = ? and timestamp = ?", path, user, timestamp) + var autoID int + var location string + err := row.Scan(&autoID, &location) + if err != nil { + return err + } + + _, err = c.db.Exec("DELETE FROM oc_files_trash WHERE auto_id=?", autoID) + if err != nil { + return err + } + + storage, err := c.GetNumericStorageID("home::" + user) + if err != nil { + return err + } + item, err := c.Get(storage, filepath.Join("files_trashbin", "files", location, path+".d"+strconv.Itoa(timestamp))) + if err != nil { + return err + } + _, err = c.db.Exec("DELETE FROM oc_filecache WHERE fileid=? OR parent=?", item.ID, item.ID) + + return err +} + +// SetEtag set a new etag for the specified item +func (c *Cache) SetEtag(storage interface{}, path, etag string) error { + storageID, err := toIntID(storage) + if err != nil { + return err + } + source, err := c.Get(storageID, path) + if err != nil { + return errors.Wrap(err, "could not find source") + } + stmt, err := c.db.Prepare("UPDATE oc_filecache SET etag=? WHERE storage = ? and fileid=?") + if err != nil { + return err + } + _, err = stmt.Exec(etag, storageID, source.ID) + return err +} + +// InsertMimetype adds a new mimetype to the database +func (c *Cache) InsertMimetype(mimetype string) error { + stmt, err := c.db.Prepare("INSERT INTO oc_mimetypes(mimetype) VALUES(?)") + if err != nil { + return err + } + _, err = stmt.Exec(mimetype) + if err != nil { + if strings.Contains(err.Error(), "UNIQUE") || strings.Contains(err.Error(), "Error 1062") { + return nil // Already exists + } + return err + } + return nil +} + +func toIntID(rid interface{}) (int, error) { + switch t := rid.(type) { + case int: + return t, nil + case string: + return strconv.Atoi(t) + default: + return -1, fmt.Errorf("invalid type") + } +} diff --git a/pkg/storage/fs/owncloudsql/filecache/filecache_suite_test.go b/pkg/storage/fs/owncloudsql/filecache/filecache_suite_test.go new file mode 100644 index 0000000000..822a699bca --- /dev/null +++ b/pkg/storage/fs/owncloudsql/filecache/filecache_suite_test.go @@ -0,0 +1,31 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package filecache_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestFilecache(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Filecache Suite") +} diff --git a/pkg/storage/fs/owncloudsql/filecache/filecache_test.go b/pkg/storage/fs/owncloudsql/filecache/filecache_test.go new file mode 100644 index 0000000000..8df48d4470 --- /dev/null +++ b/pkg/storage/fs/owncloudsql/filecache/filecache_test.go @@ -0,0 +1,468 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package filecache_test + +import ( + "database/sql" + "io/ioutil" + "os" + "strconv" + + _ "github.com/mattn/go-sqlite3" + + "github.com/cs3org/reva/pkg/storage/fs/owncloudsql/filecache" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Filecache", func() { + var ( + cache *filecache.Cache + testDbFile *os.File + sqldb *sql.DB + ) + + BeforeEach(func() { + var err error + testDbFile, err = ioutil.TempFile("", "example") + Expect(err).ToNot(HaveOccurred()) + + dbData, err := ioutil.ReadFile("test.db") + Expect(err).ToNot(HaveOccurred()) + + _, err = testDbFile.Write(dbData) + Expect(err).ToNot(HaveOccurred()) + err = testDbFile.Close() + Expect(err).ToNot(HaveOccurred()) + + sqldb, err = sql.Open("sqlite3", testDbFile.Name()) + Expect(err).ToNot(HaveOccurred()) + + cache, err = filecache.New("sqlite3", sqldb) + Expect(err).ToNot(HaveOccurred()) + }) + + AfterEach(func() { + os.Remove(testDbFile.Name()) + }) + + Describe("GetNumericStorageID", func() { + It("returns the proper storage id", func() { + nid, err := cache.GetNumericStorageID("home::admin") + Expect(err).ToNot(HaveOccurred()) + Expect(nid).To(Equal(1)) + }) + }) + + Describe("Get", func() { + It("gets existing files", func() { + path := "files/Photos/Portugal.jpg" + file, err := cache.Get(1, path) + Expect(err).ToNot(HaveOccurred()) + Expect(file).ToNot(BeNil()) + Expect(file.ID).To(Equal(10)) + Expect(file.Storage).To(Equal(1)) + Expect(file.Path).To(Equal(path)) + Expect(file.Parent).To(Equal(9)) + Expect(file.MimeType).To(Equal(6)) + Expect(file.MimePart).To(Equal(5)) + Expect(file.Size).To(Equal(243733)) + Expect(file.MTime).To(Equal(1619007009)) + Expect(file.StorageMTime).To(Equal(1619007009)) + Expect(file.Encrypted).To(BeFalse()) + Expect(file.UnencryptedSize).To(Equal(0)) + Expect(file.Name).To(Equal("Portugal.jpg")) + Expect(file.Etag).To(Equal("13cf411aefccd7183d3b117ccd0ac5f8")) + Expect(file.Checksum).To(Equal("SHA1:872adcabcb4e06bea6265200c0d71b12defe2df1 MD5:01b38c622feac31652d738a94e15e86b ADLER32:6959358d")) + }) + }) + + Describe("Path", func() { + It("returns the path", func() { + path, err := cache.Path(10) + Expect(err).ToNot(HaveOccurred()) + Expect(path).To(Equal("files/Photos/Portugal.jpg")) + }) + + It("returns the path when given a string id", func() { + path, err := cache.Path("10") + Expect(err).ToNot(HaveOccurred()) + Expect(path).To(Equal("files/Photos/Portugal.jpg")) + }) + }) + + Describe("InsertOrUpdate", func() { + Context("when inserting a new recored", func() { + It("checks for required fields", func() { + data := map[string]interface{}{ + "mimetype": "httpd/unix-directory", + "etag": "abcdefg", + } + _, err := cache.InsertOrUpdate(3, data) + Expect(err).To(MatchError("missing required data")) + + data = map[string]interface{}{ + "path": "files/Photos/foo.jpg", + "etag": "abcdefg", + } + _, err = cache.InsertOrUpdate(3, data) + Expect(err).To(MatchError("missing required data")) + + data = map[string]interface{}{ + "path": "files/Photos/foo.jpg", + "mimetype": "httpd/unix-directory", + } + _, err = cache.InsertOrUpdate(3, data) + Expect(err).To(MatchError("missing required data")) + }) + + It("inserts a new minimal entry", func() { + data := map[string]interface{}{ + "path": "files/Photos/foo.jpg", + "mimetype": "httpd/unix-directory", + "etag": "abcdefg", + } + id, err := cache.InsertOrUpdate(1, data) + Expect(err).ToNot(HaveOccurred()) + Expect(id).To(Equal(18)) + + entry, err := cache.Get(1, "files/Photos/foo.jpg") + Expect(err).ToNot(HaveOccurred()) + Expect(entry.Path).To(Equal("files/Photos/foo.jpg")) + Expect(entry.Name).To(Equal("foo.jpg")) + Expect(entry.MimeType).To(Equal(2)) + Expect(entry.MimePart).To(Equal(1)) + Expect(entry.Etag).To(Equal("abcdefg")) + }) + + It("inserts a complete entry", func() { + data := map[string]interface{}{ + "path": "files/Photos/foo.jpg", + "checksum": "SHA1: abcdefg", + "etag": "abcdefg", + "size": 1234, + "mimetype": "image/jpeg", + "mtime": 1617702482, + "storage_mtime": 1617702483, + "encrypted": true, + "unencrypted_size": 2000, + } + _, err := cache.InsertOrUpdate(1, data) + Expect(err).ToNot(HaveOccurred()) + + entry, err := cache.Get(1, "files/Photos/foo.jpg") + Expect(err).ToNot(HaveOccurred()) + Expect(entry.Path).To(Equal("files/Photos/foo.jpg")) + Expect(entry.Name).To(Equal("foo.jpg")) + Expect(entry.Checksum).To(Equal("SHA1: abcdefg")) + Expect(entry.Etag).To(Equal("abcdefg")) + Expect(entry.Size).To(Equal(1234)) + Expect(entry.MimeType).To(Equal(6)) + Expect(entry.MimePart).To(Equal(5)) + Expect(entry.MTime).To(Equal(1617702482)) + Expect(entry.StorageMTime).To(Equal(1617702483)) + Expect(entry.Encrypted).To(BeTrue()) + Expect(entry.UnencryptedSize).To(Equal(2000)) + }) + + It("sets the parent", func() { + data := map[string]interface{}{ + "path": "files/Photos/foo.jpg", + "mimetype": "httpd/unix-directory", + "etag": "abcdefg", + } + + _, err := cache.InsertOrUpdate(1, data) + Expect(err).ToNot(HaveOccurred()) + + entry, err := cache.Get(1, "files/Photos/foo.jpg") + Expect(err).ToNot(HaveOccurred()) + Expect(entry.Parent).To(Equal(9)) + }) + + It("sets the mtime storage_mtime if not set", func() { + data := map[string]interface{}{ + "path": "files/Photos/foo.jpg", + "mimetype": "httpd/unix-directory", + "etag": "abcdefg", + "storage_mtime": 1617702483, + } + + _, err := cache.InsertOrUpdate(1, data) + Expect(err).ToNot(HaveOccurred()) + + entry, err := cache.Get(1, "files/Photos/foo.jpg") + Expect(err).ToNot(HaveOccurred()) + Expect(entry.MTime).To(Equal(1617702483)) + }) + + It("sets the mimetype and part ids from the mimetype string", func() { + data := map[string]interface{}{ + "path": "files/Photos/foo.jpg", + "checksum": "SHA1: abcdefg", + "etag": "abcdefg", + "mimetype": "image/jpeg", + } + + _, err := cache.InsertOrUpdate(1, data) + Expect(err).ToNot(HaveOccurred()) + + entry, err := cache.Get(1, "files/Photos/foo.jpg") + Expect(err).ToNot(HaveOccurred()) + Expect(entry.MimeType).To(Equal(6)) + Expect(entry.MimePart).To(Equal(5)) + }) + + It("adds unknown mimetypes to the database", func() { + data := map[string]interface{}{ + "path": "files/Photos/foo.tiff", + "checksum": "SHA1: abcdefg", + "etag": "abcdefg", + "mimetype": "image/tiff", + } + + _, err := cache.InsertOrUpdate(1, data) + Expect(err).ToNot(HaveOccurred()) + + entry, err := cache.Get(1, "files/Photos/foo.tiff") + Expect(err).ToNot(HaveOccurred()) + Expect(entry.MimeType).To(Equal(9)) + Expect(entry.MimePart).To(Equal(5)) + }) + }) + + Context("when updating an existing record", func() { + var ( + data map[string]interface{} + ) + + BeforeEach(func() { + data = map[string]interface{}{ + "path": "files/Photos/foo.jpg", + "mimetype": "httpd/unix-directory", + "etag": "abcdefg", + } + _, err := cache.InsertOrUpdate(1, data) + Expect(err).ToNot(HaveOccurred()) + }) + + It("updates the record", func() { + recordBefore, err := cache.Get(1, data["path"].(string)) + Expect(err).ToNot(HaveOccurred()) + + data["etag"] = "12345" + id, err := cache.InsertOrUpdate(1, data) + Expect(err).ToNot(HaveOccurred()) + Expect(id).To(Equal(recordBefore.ID)) + + recordAfter, err := cache.Get(1, data["path"].(string)) + Expect(err).ToNot(HaveOccurred()) + + Expect(recordBefore.Etag).To(Equal("abcdefg")) + Expect(recordAfter.Etag).To(Equal("12345")) + }) + + }) + }) + + Describe("Move", func() { + It("moves a file", func() { + err := cache.Move(1, "files/Photos/Portugal.jpg", "files/Documents/Portugal.jpg") + Expect(err).ToNot(HaveOccurred()) + + _, err = cache.Get(1, "files/Photos/Portugal.jpg") + Expect(err).To(HaveOccurred()) + + newEntry, err := cache.Get(1, "files/Documents/Portugal.jpg") + Expect(err).ToNot(HaveOccurred()) + Expect(newEntry.Path).To(Equal("files/Documents/Portugal.jpg")) + }) + + It("moves a file while changing its name", func() { + err := cache.Move(1, "files/Photos/Portugal.jpg", "files/Documents/Spain.jpg") + Expect(err).ToNot(HaveOccurred()) + + _, err = cache.Get(1, "files/Photos/Portugal.jpg") + Expect(err).To(HaveOccurred()) + + newEntry, err := cache.Get(1, "files/Documents/Spain.jpg") + Expect(err).ToNot(HaveOccurred()) + Expect(newEntry.Path).To(Equal("files/Documents/Spain.jpg")) + Expect(newEntry.Name).To(Equal("Spain.jpg")) + }) + + It("moves a directory", func() { + err := cache.Move(1, "files/Photos", "files/Foo") + Expect(err).ToNot(HaveOccurred()) + + _, err = cache.Get(1, "files/Photos") + Expect(err).To(HaveOccurred()) + + _, err = cache.Get(1, "files/Photos/Portugal.jpg") + Expect(err).To(HaveOccurred()) + newEntry, err := cache.Get(1, "files/Foo/Portugal.jpg") + Expect(err).ToNot(HaveOccurred()) + Expect(newEntry.Path).To(Equal("files/Foo/Portugal.jpg")) + }) + }) + + Describe("SetEtag", func() { + It("updates the etag", func() { + entry, err := cache.Get(1, "files/Photos/Portugal.jpg") + Expect(err).ToNot(HaveOccurred()) + Expect(entry.Etag).To(Equal("13cf411aefccd7183d3b117ccd0ac5f8")) + + err = cache.SetEtag(1, "files/Photos/Portugal.jpg", "foo") + Expect(err).ToNot(HaveOccurred()) + + entry, err = cache.Get(1, "files/Photos/Portugal.jpg") + Expect(err).ToNot(HaveOccurred()) + Expect(entry.Etag).To(Equal("foo")) + }) + }) + + Context("trash", func() { + var ( + filePath = "files/Photos/Portugal.jpg" + + data = map[string]interface{}{ + "path": "files_trashbin/files/Photos", + "mimetype": "httpd/unix-directory", + "etag": "abcdefg", + } + trashPathBase = "Portugal.jpg" + trashPathTimestamp = 1619007109 + trashPath = "files_trashbin/files/Photos/" + trashPathBase + ".d" + strconv.Itoa(trashPathTimestamp) + ) + + BeforeEach(func() { + _, err := cache.InsertOrUpdate(1, data) + Expect(err).ToNot(HaveOccurred()) + }) + + Describe("Delete", func() { + It("deletes an item", func() { + err := cache.Delete(1, "admin", filePath, trashPath) + Expect(err).ToNot(HaveOccurred()) + + _, err = cache.Get(1, "files/Photos/Portugal.jpg") + Expect(err).To(HaveOccurred()) + _, err = cache.Get(1, "files_trashbin/files/Photos/Portugal.jpg.d1619007109") + Expect(err).ToNot(HaveOccurred()) + }) + + It("creates an entry in the trash table", func() { + _, err := cache.GetRecycleItem("admin", trashPathBase, trashPathTimestamp) + Expect(err).To(HaveOccurred()) + + err = cache.Delete(1, "admin", filePath, trashPath) + Expect(err).ToNot(HaveOccurred()) + + item, err := cache.GetRecycleItem("admin", trashPathBase, trashPathTimestamp) + Expect(err).ToNot(HaveOccurred()) + Expect(item.Path).To(Equal("Photos")) + }) + + It("rewrites the path of the children", func() { + err := cache.Delete(1, "admin", "files/Photos", "files_trashbin/files/Photos.d1619007109") + Expect(err).ToNot(HaveOccurred()) + }) + }) + + Describe("PurgeRecycleItem", func() { + It("removes the item from the database", func() { + err := cache.Delete(1, "admin", filePath, trashPath) + Expect(err).ToNot(HaveOccurred()) + + _, err = cache.GetRecycleItem("admin", trashPathBase, trashPathTimestamp) + Expect(err).ToNot(HaveOccurred()) + + err = cache.PurgeRecycleItem("admin", trashPathBase, trashPathTimestamp) + Expect(err).ToNot(HaveOccurred()) + + _, err = cache.GetRecycleItem("admin", trashPathBase, trashPathTimestamp) + Expect(err).To(HaveOccurred()) + }) + + It("removes the item from the filecache table", func() { + err := cache.Delete(1, "admin", filePath, trashPath) + Expect(err).ToNot(HaveOccurred()) + + err = cache.PurgeRecycleItem("admin", trashPathBase, trashPathTimestamp) + Expect(err).ToNot(HaveOccurred()) + + _, err = cache.Get(1, "files_trashbin/files/Photos.d1619007109") + Expect(err).To(HaveOccurred()) + }) + + It("removes children from the filecache table", func() { + err := cache.Delete(1, "admin", "files/Photos", "files_trashbin/files/Photos.d1619007109") + Expect(err).ToNot(HaveOccurred()) + + _, err = cache.Get(1, "files_trashbin/files/Photos.d1619007109/Portugal.jpg") + Expect(err).ToNot(HaveOccurred()) + + err = cache.PurgeRecycleItem("admin", "Photos", 1619007109) + Expect(err).ToNot(HaveOccurred()) + + _, err = cache.Get(1, "files_trashbin/files/Photos.d1619007109/Portugal.jpg") + Expect(err).To(HaveOccurred()) + }) + }) + }) + + Describe("Copy", func() { + It("copies the entry", func() { + for _, dir := range []string{"files_versions", "files_versions/Photos"} { + parentData := map[string]interface{}{ + "path": dir, + "mimetype": "httpd/unix-directory", + "etag": "abcdefg", + } + _, err := cache.InsertOrUpdate(1, parentData) + Expect(err).ToNot(HaveOccurred()) + } + + existingEntry, err := cache.Get(1, "files/Photos/Portugal.jpg") + Expect(err).ToNot(HaveOccurred()) + _, err = cache.Copy(1, "files/Photos/Portugal.jpg", "files_versions/Photos/Portugal.jpg.v1619528083") + Expect(err).ToNot(HaveOccurred()) + + newEntry, err := cache.Get(1, "files_versions/Photos/Portugal.jpg.v1619528083") + Expect(err).ToNot(HaveOccurred()) + Expect(newEntry.ID).ToNot(Equal(existingEntry.ID)) + Expect(newEntry.MimeType).To(Equal(existingEntry.MimeType)) + }) + }) + + Describe("Permissions", func() { + It("returns the permissions", func() { + perms, err := cache.Permissions(1, "files/Photos/Portugal.jpg") + Expect(err).ToNot(HaveOccurred()) + Expect(perms).ToNot(BeNil()) + Expect(perms.InitiateFileUpload).To(BeTrue()) + + perms, err = cache.Permissions(1, "files/Photos/Teotihuacan.jpg") + Expect(err).ToNot(HaveOccurred()) + Expect(perms).ToNot(BeNil()) + Expect(perms.InitiateFileUpload).To(BeFalse()) + }) + }) +}) diff --git a/pkg/storage/fs/owncloudsql/filecache/test.db b/pkg/storage/fs/owncloudsql/filecache/test.db new file mode 100644 index 0000000000..913b70f860 Binary files /dev/null and b/pkg/storage/fs/owncloudsql/filecache/test.db differ diff --git a/pkg/storage/fs/owncloudsql/owncloudsql.go b/pkg/storage/fs/owncloudsql/owncloudsql.go new file mode 100644 index 0000000000..2b7b871398 --- /dev/null +++ b/pkg/storage/fs/owncloudsql/owncloudsql.go @@ -0,0 +1,2202 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package owncloudsql + +import ( + "context" + "crypto/md5" + "crypto/sha1" + "fmt" + "hash/adler32" + "io" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "syscall" + "time" + + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + types "github.com/cs3org/go-cs3apis/cs3/types/v1beta1" + "github.com/cs3org/reva/internal/grpc/services/storageprovider" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/logger" + "github.com/cs3org/reva/pkg/mime" + "github.com/cs3org/reva/pkg/rgrpc/todo/pool" + "github.com/cs3org/reva/pkg/sharedconf" + "github.com/cs3org/reva/pkg/storage" + "github.com/cs3org/reva/pkg/storage/fs/owncloudsql/filecache" + "github.com/cs3org/reva/pkg/storage/fs/registry" + "github.com/cs3org/reva/pkg/storage/utils/ace" + "github.com/cs3org/reva/pkg/storage/utils/chunking" + "github.com/cs3org/reva/pkg/storage/utils/templates" + "github.com/cs3org/reva/pkg/user" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + "github.com/pkg/xattr" + "github.com/rs/zerolog/log" +) + +const ( + // Currently,extended file attributes have four separated + // namespaces (user, trusted, security and system) followed by a dot. + // A non root user can only manipulate the user. namespace, which is what + // we will use to store ownCloud specific metadata. To prevent name + // collisions with other apps We are going to introduce a sub namespace + // "user.oc." + ocPrefix string = "user.oc." + + // SharePrefix is the prefix for sharing related extended attributes + sharePrefix string = ocPrefix + "grant." // grants are similar to acls, but they are not propagated down the tree when being changed + trashOriginPrefix string = ocPrefix + "o" + mdPrefix string = ocPrefix + "md." // arbitrary metadata + favPrefix string = ocPrefix + "fav." // favorite flag, per user + etagPrefix string = ocPrefix + "etag." // allow overriding a calculated etag with one from the extended attributes + checksumsKey string = "http://owncloud.org/ns/checksums" +) + +var defaultPermissions *provider.ResourcePermissions = &provider.ResourcePermissions{ + // no permissions +} +var ownerPermissions *provider.ResourcePermissions = &provider.ResourcePermissions{ + // all permissions + AddGrant: true, + CreateContainer: true, + Delete: true, + GetPath: true, + GetQuota: true, + InitiateFileDownload: true, + InitiateFileUpload: true, + ListContainer: true, + ListFileVersions: true, + ListGrants: true, + ListRecycle: true, + Move: true, + PurgeRecycle: true, + RemoveGrant: true, + RestoreFileVersion: true, + RestoreRecycleItem: true, + Stat: true, + UpdateGrant: true, +} + +func init() { + registry.Register("owncloudsql", New) +} + +type config struct { + DataDirectory string `mapstructure:"datadirectory"` + UploadInfoDir string `mapstructure:"upload_info_dir"` + DeprecatedShareDirectory string `mapstructure:"sharedirectory"` + ShareFolder string `mapstructure:"share_folder"` + UserLayout string `mapstructure:"user_layout"` + Redis string `mapstructure:"redis"` + EnableHome bool `mapstructure:"enable_home"` + Scan bool `mapstructure:"scan"` + UserProviderEndpoint string `mapstructure:"userprovidersvc"` + DbUsername string `mapstructure:"dbusername"` + DbPassword string `mapstructure:"dbpassword"` + DbHost string `mapstructure:"dbhost"` + DbPort int `mapstructure:"dbport"` + DbName string `mapstructure:"dbname"` +} + +func parseConfig(m map[string]interface{}) (*config, error) { + c := &config{} + if err := mapstructure.Decode(m, c); err != nil { + err = errors.Wrap(err, "error decoding conf") + return nil, err + } + return c, nil +} + +func (c *config) init(m map[string]interface{}) { + if c.Redis == "" { + c.Redis = ":6379" + } + if c.UserLayout == "" { + c.UserLayout = "{{.Username}}" + } + if c.UploadInfoDir == "" { + c.UploadInfoDir = "/var/tmp/reva/uploadinfo" + } + // fallback for old config + if c.DeprecatedShareDirectory != "" { + c.ShareFolder = c.DeprecatedShareDirectory + } + if c.ShareFolder == "" { + c.ShareFolder = "/Shares" + } + // ensure share folder always starts with slash + c.ShareFolder = filepath.Join("/", c.ShareFolder) + + // default to scanning if not configured + if _, ok := m["scan"]; !ok { + c.Scan = true + } + c.UserProviderEndpoint = sharedconf.GetGatewaySVC(c.UserProviderEndpoint) +} + +// New returns an implementation to of the storage.FS interface that talk to +// a local filesystem. +func New(m map[string]interface{}) (storage.FS, error) { + c, err := parseConfig(m) + if err != nil { + return nil, err + } + c.init(m) + + // c.DataDirectory should never end in / unless it is the root? + c.DataDirectory = filepath.Clean(c.DataDirectory) + + // create datadir if it does not exist + err = os.MkdirAll(c.DataDirectory, 0700) + if err != nil { + logger.New().Error().Err(err). + Str("path", c.DataDirectory). + Msg("could not create datadir") + } + + err = os.MkdirAll(c.UploadInfoDir, 0700) + if err != nil { + logger.New().Error().Err(err). + Str("path", c.UploadInfoDir). + Msg("could not create uploadinfo dir") + } + + dbSource := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", c.DbUsername, c.DbPassword, c.DbHost, c.DbPort, c.DbName) + filecache, err := filecache.NewMysql(dbSource) + if err != nil { + return nil, err + } + + return &ocfs{ + c: c, + chunkHandler: chunking.NewChunkHandler(c.UploadInfoDir), + filecache: filecache, + }, nil +} + +type ocfs struct { + c *config + chunkHandler *chunking.ChunkHandler + filecache *filecache.Cache +} + +func (fs *ocfs) Shutdown(ctx context.Context) error { + return nil +} + +// owncloud stores files in the files subfolder +// the incoming path starts with /, so we need to insert the files subfolder into the path +// and prefix the data directory +// TODO the path handed to a storage provider should not contain the username +func (fs *ocfs) toInternalPath(ctx context.Context, sp string) (ip string) { + if fs.c.EnableHome { + u := user.ContextMustGetUser(ctx) + layout := templates.WithUser(u, fs.c.UserLayout) + ip = filepath.Join(fs.c.DataDirectory, layout, "files", sp) + } else { + // trim all / + sp = strings.Trim(sp, "/") + // p = "" or + // p = or + // p = /foo/bar.txt + segments := strings.SplitN(sp, "/", 2) + + if len(segments) == 1 && segments[0] == "" { + ip = fs.c.DataDirectory + return + } + + // parts[0] contains the username or userid. + u, err := fs.getUser(ctx, segments[0]) + if err != nil { + // TODO return invalid internal path? + return + } + layout := templates.WithUser(u, fs.c.UserLayout) + + if len(segments) == 1 { + // parts = "" + ip = filepath.Join(fs.c.DataDirectory, layout, "files") + } else { + // parts = "", "foo/bar.txt" + ip = filepath.Join(fs.c.DataDirectory, layout, "files", segments[1]) + } + + } + return +} + +func (fs *ocfs) toInternalShadowPath(ctx context.Context, sp string) (internal string) { + if fs.c.EnableHome { + u := user.ContextMustGetUser(ctx) + layout := templates.WithUser(u, fs.c.UserLayout) + internal = filepath.Join(fs.c.DataDirectory, layout, "shadow_files", sp) + } else { + // trim all / + sp = strings.Trim(sp, "/") + // p = "" or + // p = or + // p = /foo/bar.txt + segments := strings.SplitN(sp, "/", 2) + + if len(segments) == 1 && segments[0] == "" { + internal = fs.c.DataDirectory + return + } + + // parts[0] contains the username or userid. + u, err := fs.getUser(ctx, segments[0]) + if err != nil { + // TODO return invalid internal path? + return + } + layout := templates.WithUser(u, fs.c.UserLayout) + + if len(segments) == 1 { + // parts = "" + internal = filepath.Join(fs.c.DataDirectory, layout, "shadow_files") + } else { + // parts = "", "foo/bar.txt" + internal = filepath.Join(fs.c.DataDirectory, layout, "shadow_files", segments[1]) + } + } + return +} + +// ownloud stores versions in the files_versions subfolder +// the incoming path starts with /, so we need to insert the files subfolder into the path +// and prefix the data directory +// TODO the path handed to a storage provider should not contain the username +func (fs *ocfs) getVersionsPath(ctx context.Context, ip string) string { + // ip = /path/to/data//files/foo/bar.txt + // remove data dir + if fs.c.DataDirectory != "/" { + // fs.c.DataDirectory is a clean path, so it never ends in / + ip = strings.TrimPrefix(ip, fs.c.DataDirectory) + } + // ip = //files/foo/bar.txt + parts := strings.SplitN(ip, "/", 4) + + // parts[1] contains the username or userid. + u, err := fs.getUser(ctx, parts[1]) + if err != nil { + // TODO return invalid internal path? + return "" + } + layout := templates.WithUser(u, fs.c.UserLayout) + + switch len(parts) { + case 3: + // parts = "", "" + return filepath.Join(fs.c.DataDirectory, layout, "files_versions") + case 4: + // parts = "", "", "foo/bar.txt" + return filepath.Join(fs.c.DataDirectory, layout, "files_versions", parts[3]) + default: + return "" // TODO Must not happen? + } + +} + +// owncloud stores trashed items in the files_trashbin subfolder of a users home +func (fs *ocfs) getRecyclePath(ctx context.Context) (string, error) { + u, ok := user.ContextGetUser(ctx) + if !ok { + err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") + return "", err + } + layout := templates.WithUser(u, fs.c.UserLayout) + return filepath.Join(fs.c.DataDirectory, layout, "files_trashbin/files"), nil +} + +func (fs *ocfs) getVersionRecyclePath(ctx context.Context) (string, error) { + u, ok := user.ContextGetUser(ctx) + if !ok { + err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") + return "", err + } + layout := templates.WithUser(u, fs.c.UserLayout) + return filepath.Join(fs.c.DataDirectory, layout, "files_trashbin/versions"), nil +} + +func (fs *ocfs) toDatabasePath(ctx context.Context, ip string) string { + // TODO aduffeck: add support for non-home layout + u := user.ContextMustGetUser(ctx) + layout := templates.WithUser(u, fs.c.UserLayout) + trim := filepath.Join(fs.c.DataDirectory, layout) + p := strings.TrimPrefix(ip, trim) + p = strings.TrimPrefix(p, "/") + return p +} + +func (fs *ocfs) toStoragePath(ctx context.Context, ip string) (sp string) { + if fs.c.EnableHome { + u := user.ContextMustGetUser(ctx) + layout := templates.WithUser(u, fs.c.UserLayout) + trim := filepath.Join(fs.c.DataDirectory, layout, "files") + sp = strings.TrimPrefix(ip, trim) + // root directory + if sp == "" { + sp = "/" + } + } else { + // ip = /data//files/foo/bar.txt + // remove data dir + if fs.c.DataDirectory != "/" { + // fs.c.DataDirectory is a clean path, so it never ends in / + ip = strings.TrimPrefix(ip, fs.c.DataDirectory) + // ip = //files/foo/bar.txt + } + + segments := strings.SplitN(ip, "/", 4) + // parts = "", "", "files", "foo/bar.txt" + switch len(segments) { + case 1: + sp = "/" + case 2: + sp = filepath.Join("/", segments[1]) + case 3: + sp = filepath.Join("/", segments[1]) + default: + sp = filepath.Join("/", segments[1], segments[3]) + } + } + log := appctx.GetLogger(ctx) + log.Debug().Str("driver", "ocfs").Str("ipath", ip).Str("spath", sp).Msg("toStoragePath") + return +} + +func (fs *ocfs) toStorageShadowPath(ctx context.Context, ip string) (sp string) { + if fs.c.EnableHome { + u := user.ContextMustGetUser(ctx) + layout := templates.WithUser(u, fs.c.UserLayout) + trim := filepath.Join(fs.c.DataDirectory, layout, "shadow_files") + sp = strings.TrimPrefix(ip, trim) + } else { + // ip = /data//shadow_files/foo/bar.txt + // remove data dir + if fs.c.DataDirectory != "/" { + // fs.c.DataDirectory is a clean path, so it never ends in / + ip = strings.TrimPrefix(ip, fs.c.DataDirectory) + // ip = //shadow_files/foo/bar.txt + } + + segments := strings.SplitN(ip, "/", 4) + // parts = "", "", "shadow_files", "foo/bar.txt" + switch len(segments) { + case 1: + sp = "/" + case 2: + sp = filepath.Join("/", segments[1]) + case 3: + sp = filepath.Join("/", segments[1]) + default: + sp = filepath.Join("/", segments[1], segments[3]) + } + } + appctx.GetLogger(ctx).Debug().Str("driver", "ocfs").Str("ipath", ip).Str("spath", sp).Msg("toStorageShadowPath") + return +} + +// TODO the owner needs to come from a different place +func (fs *ocfs) getOwner(ip string) string { + ip = strings.TrimPrefix(ip, fs.c.DataDirectory) + parts := strings.SplitN(ip, "/", 3) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// TODO cache user lookup +func (fs *ocfs) getUser(ctx context.Context, usernameOrID string) (id *userpb.User, err error) { + u := user.ContextMustGetUser(ctx) + // check if username matches and id is set + if u.Username == usernameOrID && u.Id != nil && u.Id.OpaqueId != "" { + return u, nil + } + // check if userid matches and username is set + if u.Id != nil && u.Id.OpaqueId == usernameOrID && u.Username != "" { + return u, nil + } + // look up at the userprovider + + // parts[0] contains the username or userid. use user service to look up id + c, err := pool.GetUserProviderServiceClient(fs.c.UserProviderEndpoint) + if err != nil { + appctx.GetLogger(ctx). + Error().Err(err). + Str("userprovidersvc", fs.c.UserProviderEndpoint). + Str("usernameOrID", usernameOrID). + Msg("could not get user provider client") + return nil, err + } + res, err := c.GetUser(ctx, &userpb.GetUserRequest{ + UserId: &userpb.UserId{OpaqueId: usernameOrID}, + }) + if err != nil { + appctx.GetLogger(ctx). + Error().Err(err). + Str("userprovidersvc", fs.c.UserProviderEndpoint). + Str("usernameOrID", usernameOrID). + Msg("could not get user") + return nil, err + } + + if res.Status.Code == rpc.Code_CODE_NOT_FOUND { + appctx.GetLogger(ctx). + Error(). + Str("userprovidersvc", fs.c.UserProviderEndpoint). + Str("usernameOrID", usernameOrID). + Interface("status", res.Status). + Msg("user not found") + return nil, fmt.Errorf("user not found") + } + + if res.Status.Code != rpc.Code_CODE_OK { + appctx.GetLogger(ctx). + Error(). + Str("userprovidersvc", fs.c.UserProviderEndpoint). + Str("usernameOrID", usernameOrID). + Interface("status", res.Status). + Msg("user lookup failed") + return nil, fmt.Errorf("user lookup failed") + } + return res.User, nil +} + +// permissionSet returns the permission set for the current user +func (fs *ocfs) permissionSet(ctx context.Context, owner *userpb.UserId) *provider.ResourcePermissions { + if owner == nil { + return &provider.ResourcePermissions{ + Stat: true, + } + } + u, ok := user.ContextGetUser(ctx) + if !ok { + return &provider.ResourcePermissions{ + // no permissions + } + } + if u.Id == nil { + return &provider.ResourcePermissions{ + // no permissions + } + } + if u.Id.OpaqueId == owner.OpaqueId && u.Id.Idp == owner.Idp { + return &provider.ResourcePermissions{ + // owner has all permissions + AddGrant: true, + CreateContainer: true, + Delete: true, + GetPath: true, + GetQuota: true, + InitiateFileDownload: true, + InitiateFileUpload: true, + ListContainer: true, + ListFileVersions: true, + ListGrants: true, + ListRecycle: true, + Move: true, + PurgeRecycle: true, + RemoveGrant: true, + RestoreFileVersion: true, + RestoreRecycleItem: true, + Stat: true, + UpdateGrant: true, + } + } + // TODO fix permissions for share recipients by traversing reading acls up to the root? cache acls for the parent node and reuse it + return &provider.ResourcePermissions{ + AddGrant: true, + CreateContainer: true, + Delete: true, + GetPath: true, + GetQuota: true, + InitiateFileDownload: true, + InitiateFileUpload: true, + ListContainer: true, + ListFileVersions: true, + ListGrants: true, + ListRecycle: true, + Move: true, + PurgeRecycle: true, + RemoveGrant: true, + RestoreFileVersion: true, + RestoreRecycleItem: true, + Stat: true, + UpdateGrant: true, + } +} + +func (fs *ocfs) getUserStorage(ctx context.Context) (int, error) { + user, ok := user.ContextGetUser(ctx) + if !ok { + return -1, fmt.Errorf("Could not get user for context") + } + return fs.filecache.GetNumericStorageID("home::" + user.Username) +} + +func (fs *ocfs) convertToResourceInfo(ctx context.Context, fi os.FileInfo, ip string, sp string, mdKeys []string) (*provider.ResourceInfo, error) { + storage, err := fs.getUserStorage(ctx) + if err != nil { + return nil, err + } + + p := fs.toDatabasePath(ctx, ip) + cacheEntry, err := fs.filecache.Get(storage, p) + if err != nil { + return nil, err + } + + mdKeysMap := make(map[string]struct{}) + for _, k := range mdKeys { + mdKeysMap[k] = struct{}{} + } + + var returnAllKeys bool + if _, ok := mdKeysMap["*"]; len(mdKeys) == 0 || ok { + returnAllKeys = true + } + + ri := &provider.ResourceInfo{ + Id: &provider.ResourceId{OpaqueId: strconv.Itoa(cacheEntry.ID)}, + Path: sp, + Type: getResourceType(fi.IsDir()), + Etag: cacheEntry.Etag, + MimeType: mime.Detect(fi.IsDir(), ip), + Size: uint64(fi.Size()), + Mtime: &types.Timestamp{ + Seconds: uint64(fi.ModTime().Unix()), + // TODO read nanos from where? Nanos: fi.MTimeNanos, + }, + ArbitraryMetadata: &provider.ArbitraryMetadata{ + Metadata: map[string]string{}, // TODO aduffeck: which metadata needs to go in here? + }, + } + + if owner, err := fs.getUser(ctx, fs.getOwner(ip)); err == nil { + ri.Owner = owner.Id + } else { + appctx.GetLogger(ctx).Error().Err(err).Msg("error getting owner") + } + + ri.PermissionSet = fs.permissionSet(ctx, ri.Owner) + + // checksums + if !fi.IsDir() { + if _, checksumRequested := mdKeysMap[checksumsKey]; returnAllKeys || checksumRequested { + // TODO which checksum was requested? sha1 adler32 or md5? for now hardcode sha1? + readChecksumIntoResourceChecksum(ctx, cacheEntry.Checksum, storageprovider.XSSHA1, ri) + readChecksumIntoOpaque(ctx, cacheEntry.Checksum, storageprovider.XSMD5, ri) + readChecksumIntoOpaque(ctx, ip, storageprovider.XSAdler32, ri) + } + } + + return ri, nil +} +func getResourceType(isDir bool) provider.ResourceType { + if isDir { + return provider.ResourceType_RESOURCE_TYPE_CONTAINER + } + return provider.ResourceType_RESOURCE_TYPE_FILE +} + +// GetPathByID returns the storage relative path for the file id, without the internal namespace +func (fs *ocfs) GetPathByID(ctx context.Context, id *provider.ResourceId) (string, error) { + ip, err := fs.filecache.Path(id.OpaqueId) + if err != nil { + return "", err + } + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.GetPath { + return "", errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return "", errtypes.NotFound(fs.toStoragePath(ctx, ip)) + } + return "", errors.Wrap(err, "ocfs: error reading permissions") + } + + return fs.toStoragePath(ctx, ip), nil +} + +// resolve takes in a request path or request id and converts it to an internal path. +func (fs *ocfs) resolve(ctx context.Context, ref *provider.Reference) (string, error) { + if ref.GetPath() != "" { + return fs.toInternalPath(ctx, ref.GetPath()), nil + } + + if ref.GetId() != nil { + p, err := fs.filecache.Path(ref.GetId().OpaqueId) + if err != nil { + return "", err + } + p = strings.TrimPrefix(p, "files/") + if !fs.c.EnableHome { + u, ok := user.ContextGetUser(ctx) + if !ok { + return "", fmt.Errorf("could not infer user from context") + } + p = filepath.Join(u.Username, p) + } + + return fs.toInternalPath(ctx, p), nil + } + + // reference is invalid + return "", fmt.Errorf("invalid reference %+v", ref) +} + +func (fs *ocfs) AddGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { + ip, err := fs.resolve(ctx, ref) + if err != nil { + return errors.Wrap(err, "ocfs: error resolving reference") + } + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.AddGrant { + return errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return errtypes.NotFound(fs.toStoragePath(ctx, ip)) + } + return errors.Wrap(err, "ocfs: error reading permissions") + } + + e := ace.FromGrant(g) + principal, value := e.Marshal() + if err := xattr.Set(ip, sharePrefix+principal, value); err != nil { + return err + } + return fs.propagate(ctx, ip) +} + +// extractACEsFromAttrs reads ACEs in the list of attrs from the file +func extractACEsFromAttrs(ctx context.Context, ip string, attrs []string) (entries []*ace.ACE) { + log := appctx.GetLogger(ctx) + entries = []*ace.ACE{} + for i := range attrs { + if strings.HasPrefix(attrs[i], sharePrefix) { + var value []byte + var err error + if value, err = xattr.Get(ip, attrs[i]); err != nil { + log.Error().Err(err).Str("attr", attrs[i]).Msg("could not read attribute") + continue + } + var e *ace.ACE + principal := attrs[i][len(sharePrefix):] + if e, err = ace.Unmarshal(principal, value); err != nil { + log.Error().Err(err).Str("principal", principal).Str("attr", attrs[i]).Msg("could not unmarshal ace") + continue + } + entries = append(entries, e) + } + } + return +} + +func (fs *ocfs) readPermissions(ctx context.Context, ip string) (p *provider.ResourcePermissions, err error) { + u, ok := user.ContextGetUser(ctx) + if !ok { + appctx.GetLogger(ctx).Debug().Str("ipath", ip).Msg("no user in context, returning default permissions") + return defaultPermissions, nil + } + // check if the current user is the owner + if fs.getOwner(ip) == u.Username { + appctx.GetLogger(ctx).Debug().Str("ipath", ip).Msg("user is owner, returning owner permissions") + return ownerPermissions, nil + } + + storageID, err := fs.getUserStorage(ctx) + if err != nil { + return nil, err + } + return fs.filecache.Permissions(storageID, fs.toDatabasePath(ctx, ip)) +} + +// The os not exists error is buried inside the xattr error, +// so we cannot just use os.IsNotExists(). +func isNotFound(err error) bool { + if xerr, ok := err.(*xattr.Error); ok { + if serr, ok2 := xerr.Err.(syscall.Errno); ok2 { + return serr == syscall.ENOENT + } + } + return false +} + +func (fs *ocfs) ListGrants(ctx context.Context, ref *provider.Reference) (grants []*provider.Grant, err error) { + log := appctx.GetLogger(ctx) + var ip string + if ip, err = fs.resolve(ctx, ref); err != nil { + return nil, errors.Wrap(err, "ocfs: error resolving reference") + } + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.ListGrants { + return nil, errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return nil, errtypes.NotFound(fs.toStoragePath(ctx, ip)) + } + return nil, errors.Wrap(err, "ocfs: error reading permissions") + } + + var attrs []string + if attrs, err = xattr.List(ip); err != nil { + // TODO err might be a not exists + log.Error().Err(err).Msg("error listing attributes") + return nil, err + } + + log.Debug().Interface("attrs", attrs).Msg("read attributes") + + aces := extractACEsFromAttrs(ctx, ip, attrs) + + grants = make([]*provider.Grant, 0, len(aces)) + for i := range aces { + grants = append(grants, aces[i].Grant()) + } + + return grants, nil +} + +func (fs *ocfs) RemoveGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) (err error) { + + var ip string + if ip, err = fs.resolve(ctx, ref); err != nil { + return errors.Wrap(err, "ocfs: error resolving reference") + } + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.ListContainer { + return errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return errtypes.NotFound(fs.toStoragePath(ctx, ip)) + } + return errors.Wrap(err, "ocfs: error reading permissions") + } + + var attr string + if g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_GROUP { + attr = sharePrefix + "g:" + g.Grantee.GetGroupId().OpaqueId + } else { + attr = sharePrefix + "u:" + g.Grantee.GetUserId().OpaqueId + } + + if err = xattr.Remove(ip, attr); err != nil { + return + } + + return fs.propagate(ctx, ip) +} + +func (fs *ocfs) UpdateGrant(ctx context.Context, ref *provider.Reference, g *provider.Grant) error { + ip, err := fs.resolve(ctx, ref) + if err != nil { + return errors.Wrap(err, "ocfs: error resolving reference") + } + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.UpdateGrant { + return errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return errtypes.NotFound(fs.toStoragePath(ctx, ip)) + } + return errors.Wrap(err, "ocfs: error reading permissions") + } + + e := ace.FromGrant(g) + principal, value := e.Marshal() + if err := xattr.Set(ip, sharePrefix+principal, value); err != nil { + return err + } + return fs.propagate(ctx, ip) +} + +func (fs *ocfs) GetQuota(ctx context.Context) (uint64, uint64, error) { + return 0, 0, nil +} + +func (fs *ocfs) CreateHome(ctx context.Context) error { + u, ok := user.ContextGetUser(ctx) + if !ok { + err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") + return err + } + layout := templates.WithUser(u, fs.c.UserLayout) + + homePaths := []string{ + filepath.Join(fs.c.DataDirectory, layout, "files"), + filepath.Join(fs.c.DataDirectory, layout, "files_trashbin"), + filepath.Join(fs.c.DataDirectory, layout, "files_trashbin/files"), + filepath.Join(fs.c.DataDirectory, layout, "files_trashbin/versions"), + filepath.Join(fs.c.DataDirectory, layout, "uploads"), + filepath.Join(fs.c.DataDirectory, layout, "shadow_files"), + } + + storageID, err := fs.getUserStorage(ctx) + if err != nil { + return err + } + for _, v := range homePaths { + if err := os.MkdirAll(v, 0700); err != nil { + return errors.Wrap(err, "ocfs: error creating home path: "+v) + } + + fi, err := os.Stat(v) + if err != nil { + return err + } + data := map[string]interface{}{ + "path": fs.toDatabasePath(ctx, v), + "etag": calcEtag(ctx, fi), + "mimetype": "httpd/unix-directory", + } + _, err = fs.filecache.InsertOrUpdate(storageID, data) + if err != nil { + return err + } + } + + return nil +} + +// If home is enabled, the relative home is always the empty string +func (fs *ocfs) GetHome(ctx context.Context) (string, error) { + if !fs.c.EnableHome { + return "", errtypes.NotSupported("ocfs: get home not supported") + } + return "", nil +} + +func (fs *ocfs) CreateDir(ctx context.Context, sp string) (err error) { + ip := fs.toInternalPath(ctx, sp) + + // check permissions of parent dir + if perm, err := fs.readPermissions(ctx, filepath.Dir(ip)); err == nil { + if !perm.CreateContainer { + return errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) + } + return errors.Wrap(err, "ocfs: error reading permissions") + } + + if err = os.Mkdir(ip, 0700); err != nil { + if os.IsNotExist(err) { + return errtypes.NotFound(sp) + } + // FIXME we also need already exists error, webdav expects 405 MethodNotAllowed + return errors.Wrap(err, "ocfs: error creating dir "+ip) + } + + fi, err := os.Stat(ip) + if err != nil { + return err + } + mtime := time.Now().Unix() + data := map[string]interface{}{ + "path": fs.toDatabasePath(ctx, ip), + "etag": calcEtag(ctx, fi), + "mimetype": "httpd/unix-directory", + "permissions": 31, // 1: READ, 2: UPDATE, 4: CREATE, 8: DELETE, 16: SHARE + "mtime": mtime, + "storage_mtime": mtime, + } + storageID, err := fs.getUserStorage(ctx) + if err != nil { + return err + } + _, err = fs.filecache.InsertOrUpdate(storageID, data) + if err != nil { + if err != nil { + return err + } + } + + return fs.propagate(ctx, ip) +} + +func (fs *ocfs) isShareFolderChild(sp string) bool { + return strings.HasPrefix(sp, fs.c.ShareFolder) +} + +func (fs *ocfs) isShareFolderRoot(sp string) bool { + return sp == fs.c.ShareFolder +} + +func (fs *ocfs) CreateReference(ctx context.Context, sp string, targetURI *url.URL) error { + if !fs.isShareFolderChild(sp) { + return errtypes.PermissionDenied("ocfs: cannot create references outside the share folder: share_folder=" + "/Shares" + " path=" + sp) + } + + ip := fs.toInternalShadowPath(ctx, sp) + // TODO check permission? + + dir, _ := filepath.Split(ip) + if err := os.MkdirAll(dir, 0700); err != nil { + return errors.Wrapf(err, "ocfs: error creating shadow path %s", dir) + } + + f, err := os.Create(ip) + if err != nil { + return errors.Wrapf(err, "ocfs: error creating shadow file %s", ip) + } + + err = xattr.FSet(f, mdPrefix+"target", []byte(targetURI.String())) + if err != nil { + return errors.Wrapf(err, "ocfs: error setting the target %s on the shadow file %s", targetURI.String(), ip) + } + return nil +} + +func (fs *ocfs) setMtime(ctx context.Context, ip string, mtime string) error { + log := appctx.GetLogger(ctx) + if mt, err := parseMTime(mtime); err == nil { + // updating mtime also updates atime + if err := os.Chtimes(ip, mt, mt); err != nil { + log.Error().Err(err). + Str("ipath", ip). + Time("mtime", mt). + Msg("could not set mtime") + return errors.Wrap(err, "could not set mtime") + } + } else { + log.Error().Err(err). + Str("ipath", ip). + Str("mtime", mtime). + Msg("could not parse mtime") + return errors.Wrap(err, "could not parse mtime") + } + return nil +} +func (fs *ocfs) SetArbitraryMetadata(ctx context.Context, ref *provider.Reference, md *provider.ArbitraryMetadata) (err error) { + log := appctx.GetLogger(ctx) + + var ip string + if ip, err = fs.resolve(ctx, ref); err != nil { + return errors.Wrap(err, "ocfs: error resolving reference") + } + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.InitiateFileUpload { // TODO add dedicated permission? + return errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) + } + return errors.Wrap(err, "ocfs: error reading permissions") + } + + var fi os.FileInfo + fi, err = os.Stat(ip) + if err != nil { + if os.IsNotExist(err) { + return errtypes.NotFound(fs.toStoragePath(ctx, ip)) + } + return errors.Wrap(err, "ocfs: error stating "+ip) + } + + errs := []error{} + + if md.Metadata != nil { + if val, ok := md.Metadata["mtime"]; ok { + err := fs.setMtime(ctx, ip, val) + if err != nil { + errs = append(errs, errors.Wrap(err, "could not set mtime")) + } + // remove from metadata + delete(md.Metadata, "mtime") + } + // TODO(jfd) special handling for atime? + // TODO(jfd) allow setting birth time (btime)? + // TODO(jfd) any other metadata that is interesting? fileid? + if val, ok := md.Metadata["etag"]; ok { + etag := calcEtag(ctx, fi) + val = fmt.Sprintf("\"%s\"", strings.Trim(val, "\"")) + if etag == val { + log.Debug(). + Str("ipath", ip). + Str("etag", val). + Msg("ignoring request to update identical etag") + } else + // etag is only valid until the calculated etag changes + // TODO(jfd) cleanup in a batch job + if err := xattr.Set(ip, etagPrefix+etag, []byte(val)); err != nil { + log.Error().Err(err). + Str("ipath", ip). + Str("calcetag", etag). + Str("etag", val). + Msg("could not set etag") + errs = append(errs, errors.Wrap(err, "could not set etag")) + } + delete(md.Metadata, "etag") + } + if val, ok := md.Metadata["http://owncloud.org/ns/favorite"]; ok { + // TODO we should not mess with the user here ... the favorites is now a user specific property for a file + // that cannot be mapped to extended attributes without leaking who has marked a file as a favorite + // it is a specific case of a tag, which is user individual as well + // TODO there are different types of tags + // 1. public that are managed by everyone + // 2. private tags that are only visible to the user + // 3. system tags that are only visible to the system + // 4. group tags that are only visible to a group ... + // urgh ... well this can be solved using different namespaces + // 1. public = p: + // 2. private = u:: for user specific + // 3. system = s: for system + // 4. group = g:: + // 5. app? = a:: for apps? + // obviously this only is secure when the u/s/g/a namespaces are not accessible by users in the filesystem + // public tags can be mapped to extended attributes + if u, ok := user.ContextGetUser(ctx); ok { + // the favorite flag is specific to the user, so we need to incorporate the userid + if uid := u.GetId(); uid != nil { + fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) + if err := xattr.Set(ip, fa, []byte(val)); err != nil { + log.Error().Err(err). + Str("ipath", ip). + Interface("user", u). + Str("key", fa). + Msg("could not set favorite flag") + errs = append(errs, errors.Wrap(err, "could not set favorite flag")) + } + } else { + log.Error(). + Str("ipath", ip). + Interface("user", u). + Msg("user has no id") + errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) + } + } else { + log.Error(). + Str("ipath", ip). + Interface("user", u). + Msg("error getting user from ctx") + errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx")) + } + // remove from metadata + delete(md.Metadata, "http://owncloud.org/ns/favorite") + } + } + for k, v := range md.Metadata { + if err := xattr.Set(ip, mdPrefix+k, []byte(v)); err != nil { + log.Error().Err(err). + Str("ipath", ip). + Str("key", k). + Str("val", v). + Msg("could not set metadata") + errs = append(errs, errors.Wrap(err, "could not set metadata")) + } + } + switch len(errs) { + case 0: + return fs.propagate(ctx, ip) + case 1: + return errs[0] + default: + // TODO how to return multiple errors? + return errors.New("multiple errors occurred, see log for details") + } +} + +func parseMTime(v string) (t time.Time, err error) { + p := strings.SplitN(v, ".", 2) + var sec, nsec int64 + if sec, err = strconv.ParseInt(p[0], 10, 64); err == nil { + if len(p) > 1 { + nsec, err = strconv.ParseInt(p[1], 10, 64) + } + } + return time.Unix(sec, nsec), err +} + +func (fs *ocfs) UnsetArbitraryMetadata(ctx context.Context, ref *provider.Reference, keys []string) (err error) { + log := appctx.GetLogger(ctx) + + var ip string + if ip, err = fs.resolve(ctx, ref); err != nil { + return errors.Wrap(err, "ocfs: error resolving reference") + } + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.InitiateFileUpload { // TODO add dedicated permission? + return errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return errtypes.NotFound(fs.toStoragePath(ctx, ip)) + } + return errors.Wrap(err, "ocfs: error reading permissions") + } + + _, err = os.Stat(ip) + if err != nil { + if os.IsNotExist(err) { + return errtypes.NotFound(fs.toStoragePath(ctx, ip)) + } + return errors.Wrap(err, "ocfs: error stating "+ip) + } + + errs := []error{} + for _, k := range keys { + switch k { + case "http://owncloud.org/ns/favorite": + if u, ok := user.ContextGetUser(ctx); ok { + // the favorite flag is specific to the user, so we need to incorporate the userid + if uid := u.GetId(); uid != nil { + fa := fmt.Sprintf("%s%s@%s", favPrefix, uid.GetOpaqueId(), uid.GetIdp()) + if err := xattr.Remove(ip, fa); err != nil { + log.Error().Err(err). + Str("ipath", ip). + Interface("user", u). + Str("key", fa). + Msg("could not unset favorite flag") + errs = append(errs, errors.Wrap(err, "could not unset favorite flag")) + } + } else { + log.Error(). + Str("ipath", ip). + Interface("user", u). + Msg("user has no id") + errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "user has no id")) + } + } else { + log.Error(). + Str("ipath", ip). + Interface("user", u). + Msg("error getting user from ctx") + errs = append(errs, errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx")) + } + default: + if err = xattr.Remove(ip, mdPrefix+k); err != nil { + // a non-existing attribute will return an error, which we can ignore + // (using string compare because the error type is syscall.Errno and not wrapped/recognizable) + if e, ok := err.(*xattr.Error); !ok || !(e.Err.Error() == "no data available" || + // darwin + e.Err.Error() == "attribute not found") { + log.Error().Err(err). + Str("ipath", ip). + Str("key", k). + Msg("could not unset metadata") + errs = append(errs, errors.Wrap(err, "could not unset metadata")) + } + } + } + } + + switch len(errs) { + case 0: + return fs.propagate(ctx, ip) + case 1: + return errs[0] + default: + // TODO how to return multiple errors? + return errors.New("multiple errors occurred, see log for details") + } +} + +// Delete is actually only a move to trash +// +// This is a first optimistic approach. +// When a file has versions and we want to delete the file it could happen that +// the service crashes before all moves are finished. +// That would result in invalid state like the main files was moved but the +// versions were not. +// We will live with that compromise since this storage driver will be +// deprecated soon. +func (fs *ocfs) Delete(ctx context.Context, ref *provider.Reference) (err error) { + var ip string + if ip, err = fs.resolve(ctx, ref); err != nil { + return errors.Wrap(err, "ocfs: error resolving reference") + } + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.Delete { + return errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) + } + return errors.Wrap(err, "ocfs: error reading permissions") + } + + _, err = os.Stat(ip) + if err != nil { + if os.IsNotExist(err) { + return errtypes.NotFound(fs.toStoragePath(ctx, ip)) + } + return errors.Wrap(err, "ocfs: error stating "+ip) + } + + rp, err := fs.getRecyclePath(ctx) + if err != nil { + return errors.Wrap(err, "ocfs: error resolving recycle path") + } + + if err := os.MkdirAll(rp, 0700); err != nil { + return errors.Wrap(err, "ocfs: error creating trashbin dir "+rp) + } + + // ip is the path on disk ... we need only the path relative to root + origin := filepath.Dir(fs.toStoragePath(ctx, ip)) + + err = fs.trash(ctx, ip, rp, origin) + if err != nil { + return errors.Wrapf(err, "ocfs: error deleting file %s", ip) + } + err = fs.trashVersions(ctx, ip, origin) + if err != nil { + return errors.Wrapf(err, "ocfs: error deleting versions of file %s", ip) + } + return nil +} + +func (fs *ocfs) trash(ctx context.Context, ip string, rp string, origin string) error { + + // move to trash location + dtime := time.Now().Unix() + tgt := filepath.Join(rp, fmt.Sprintf("%s.d%d", filepath.Base(ip), dtime)) + if err := os.Rename(ip, tgt); err != nil { + if os.IsExist(err) { + // timestamp collision, try again with higher value: + dtime++ + tgt := filepath.Join(rp, fmt.Sprintf("%s.d%d", filepath.Base(ip), dtime)) + if err := os.Rename(ip, tgt); err != nil { + return errors.Wrap(err, "ocfs: could not move item to trash") + } + } + } + + storage, err := fs.getUserStorage(ctx) + if err != nil { + return err + } + err = fs.filecache.Delete(storage, user.ContextMustGetUser(ctx).Username, fs.toDatabasePath(ctx, ip), fs.toDatabasePath(ctx, tgt)) + if err != nil { + return err + } + + return fs.propagate(ctx, filepath.Dir(ip)) +} + +func (fs *ocfs) trashVersions(ctx context.Context, ip string, origin string) error { + vp := fs.getVersionsPath(ctx, ip) + vrp, err := fs.getVersionRecyclePath(ctx) + if err != nil { + return errors.Wrap(err, "error resolving versions recycle path") + } + + if err := os.MkdirAll(vrp, 0700); err != nil { + return errors.Wrap(err, "ocfs: error creating trashbin dir "+vrp) + } + + // Ignore error since the only possible error is malformed pattern. + versions, _ := filepath.Glob(vp + ".v*") + for _, v := range versions { + err := fs.trash(ctx, v, vrp, origin) + if err != nil { + return errors.Wrap(err, "ocfs: error deleting file "+v) + } + } + return nil +} + +func (fs *ocfs) Move(ctx context.Context, oldRef, newRef *provider.Reference) (err error) { + var oldIP string + if oldIP, err = fs.resolve(ctx, oldRef); err != nil { + return errors.Wrap(err, "ocfs: error resolving reference") + } + + // check permissions + if perm, err := fs.readPermissions(ctx, oldIP); err == nil { + if !perm.Move { // TODO add dedicated permission? + return errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(oldIP))) + } + return errors.Wrap(err, "ocfs: error reading permissions") + } + + var newIP string + if newIP, err = fs.resolve(ctx, newRef); err != nil { + return errors.Wrap(err, "ocfs: error resolving reference") + } + + // TODO check target permissions ... if it exists + storage, err := fs.getUserStorage(ctx) + if err != nil { + return err + } + err = fs.filecache.Move(storage, fs.toDatabasePath(ctx, oldIP), fs.toDatabasePath(ctx, newIP)) + if err != nil { + return err + } + if err = os.Rename(oldIP, newIP); err != nil { + return errors.Wrap(err, "ocfs: error moving "+oldIP+" to "+newIP) + } + + if err := fs.propagate(ctx, newIP); err != nil { + return err + } + if err := fs.propagate(ctx, filepath.Dir(oldIP)); err != nil { + return err + } + return nil +} + +func (fs *ocfs) GetMD(ctx context.Context, ref *provider.Reference, mdKeys []string) (*provider.ResourceInfo, error) { + ip, err := fs.resolve(ctx, ref) + if err != nil { + // TODO return correct errtype + if _, ok := err.(errtypes.IsNotFound); ok { + return nil, err + } + return nil, errors.Wrap(err, "ocfs: error resolving reference") + } + p := fs.toStoragePath(ctx, ip) + + if fs.c.EnableHome { + if fs.isShareFolderChild(p) { + return fs.getMDShareFolder(ctx, p, mdKeys) + } + } + + // If GetMD is called for a path shared with the user then the path is + // already wrapped. (fs.resolve wraps the path) + if strings.HasPrefix(p, fs.c.DataDirectory) { + ip = p + } + + md, err := os.Stat(ip) + if err != nil { + if os.IsNotExist(err) { + return nil, errtypes.NotFound(fs.toStoragePath(ctx, ip)) + } + return nil, errors.Wrap(err, "ocfs: error stating "+ip) + } + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.Stat { + return nil, errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) + } + return nil, errors.Wrap(err, "ocfs: error reading permissions") + } + + return fs.convertToResourceInfo(ctx, md, ip, fs.toStoragePath(ctx, ip), mdKeys) +} + +func (fs *ocfs) getMDShareFolder(ctx context.Context, sp string, mdKeys []string) (*provider.ResourceInfo, error) { + ip := fs.toInternalShadowPath(ctx, sp) + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.Stat { + return nil, errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) + } + return nil, errors.Wrap(err, "ocfs: error reading permissions") + } + + md, err := os.Stat(ip) + if err != nil { + if os.IsNotExist(err) { + return nil, errtypes.NotFound(fs.toStorageShadowPath(ctx, ip)) + } + return nil, errors.Wrapf(err, "ocfs: error stating %s", ip) + } + m, err := fs.convertToResourceInfo(ctx, md, ip, fs.toStorageShadowPath(ctx, ip), mdKeys) + if err != nil { + return nil, err + } + + if !fs.isShareFolderRoot(sp) { + m.Type = provider.ResourceType_RESOURCE_TYPE_REFERENCE + ref, err := xattr.Get(ip, mdPrefix+"target") + if err != nil { + if isNotFound(err) { + return nil, errtypes.NotFound(fs.toStorageShadowPath(ctx, ip)) + } + return nil, err + } + m.Target = string(ref) + } + + return m, nil +} + +func (fs *ocfs) ListFolder(ctx context.Context, ref *provider.Reference, mdKeys []string) ([]*provider.ResourceInfo, error) { + log := appctx.GetLogger(ctx) + + ip, err := fs.resolve(ctx, ref) + if err != nil { + return nil, errors.Wrap(err, "ocfs: error resolving reference") + } + sp := fs.toStoragePath(ctx, ip) + + if fs.c.EnableHome { + log.Debug().Msg("home enabled") + if strings.HasPrefix(sp, "/") { + // permissions checked in listWithHome + return fs.listWithHome(ctx, "/", sp, mdKeys) + } + } + + log.Debug().Msg("list with nominal home") + // permissions checked in listWithNominalHome + return fs.listWithNominalHome(ctx, sp, mdKeys) +} + +func (fs *ocfs) listWithNominalHome(ctx context.Context, ip string, mdKeys []string) ([]*provider.ResourceInfo, error) { + + // If a user wants to list a folder shared with him the path will already + // be wrapped with the files directory path of the share owner. + // In that case we don't want to wrap the path again. + if !strings.HasPrefix(ip, fs.c.DataDirectory) { + ip = fs.toInternalPath(ctx, ip) + } + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.ListContainer { + return nil, errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) + } + return nil, errors.Wrap(err, "ocfs: error reading permissions") + } + + mds, err := ioutil.ReadDir(ip) + if err != nil { + return nil, errors.Wrapf(err, "ocfs: error listing %s", ip) + } + finfos := []*provider.ResourceInfo{} + for _, md := range mds { + cp := filepath.Join(ip, md.Name()) + m, err := fs.convertToResourceInfo(ctx, md, cp, fs.toStoragePath(ctx, cp), mdKeys) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("path", cp).Msg("error converting to a resource info") + } + finfos = append(finfos, m) + } + return finfos, nil +} + +func (fs *ocfs) listWithHome(ctx context.Context, home, p string, mdKeys []string) ([]*provider.ResourceInfo, error) { + log := appctx.GetLogger(ctx) + if p == home { + log.Debug().Msg("listing home") + return fs.listHome(ctx, home, mdKeys) + } + + if fs.isShareFolderRoot(p) { + log.Debug().Msg("listing share folder root") + return fs.listShareFolderRoot(ctx, p, mdKeys) + } + + if fs.isShareFolderChild(p) { + return nil, errtypes.PermissionDenied("ocfs: error listing folders inside the shared folder, only file references are stored inside") + } + + log.Debug().Msg("listing nominal home") + return fs.listWithNominalHome(ctx, p, mdKeys) +} + +func (fs *ocfs) listHome(ctx context.Context, home string, mdKeys []string) ([]*provider.ResourceInfo, error) { + // list files + ip := fs.toInternalPath(ctx, home) + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.ListContainer { + return nil, errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) + } + return nil, errors.Wrap(err, "ocfs: error reading permissions") + } + + mds, err := ioutil.ReadDir(ip) + if err != nil { + return nil, errors.Wrap(err, "ocfs: error listing files") + } + + finfos := []*provider.ResourceInfo{} + for _, md := range mds { + cp := filepath.Join(ip, md.Name()) + m, err := fs.convertToResourceInfo(ctx, md, cp, fs.toStoragePath(ctx, cp), mdKeys) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("path", cp).Msg("error converting to a resource info") + } + finfos = append(finfos, m) + } + + // list shadow_files + ip = fs.toInternalShadowPath(ctx, home) + mds, err = ioutil.ReadDir(ip) + if err != nil { + return nil, errors.Wrap(err, "ocfs: error listing shadow_files") + } + for _, md := range mds { + cp := filepath.Join(ip, md.Name()) + m, err := fs.convertToResourceInfo(ctx, md, cp, fs.toStorageShadowPath(ctx, cp), mdKeys) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("path", cp).Msg("error converting to a resource info") + } + finfos = append(finfos, m) + } + return finfos, nil +} + +func (fs *ocfs) listShareFolderRoot(ctx context.Context, sp string, mdKeys []string) ([]*provider.ResourceInfo, error) { + ip := fs.toInternalShadowPath(ctx, sp) + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.ListContainer { + return nil, errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) + } + return nil, errors.Wrap(err, "ocfs: error reading permissions") + } + + mds, err := ioutil.ReadDir(ip) + if err != nil { + if os.IsNotExist(err) { + return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) + } + return nil, errors.Wrap(err, "ocfs: error listing shadow_files") + } + + finfos := []*provider.ResourceInfo{} + for _, md := range mds { + cp := filepath.Join(ip, md.Name()) + m, err := fs.convertToResourceInfo(ctx, md, cp, fs.toStorageShadowPath(ctx, cp), mdKeys) + if err != nil { + appctx.GetLogger(ctx).Error().Err(err).Str("path", cp).Msg("error converting to a resource info") + } + m.Type = provider.ResourceType_RESOURCE_TYPE_REFERENCE + ref, err := xattr.Get(cp, mdPrefix+"target") + if err != nil { + return nil, err + } + m.Target = string(ref) + finfos = append(finfos, m) + } + + return finfos, nil +} + +func (fs *ocfs) archiveRevision(ctx context.Context, vbp string, ip string) error { + // move existing file to versions dir + vp := fmt.Sprintf("%s.v%d", vbp, time.Now().Unix()) + if err := os.MkdirAll(filepath.Dir(vp), 0700); err != nil { + return errors.Wrap(err, "ocfs: error creating versions dir "+vp) + } + + // TODO(jfd): make sure rename is atomic, missing fsync ... + if err := os.Rename(ip, vp); err != nil { + return errors.Wrap(err, "ocfs: error renaming from "+ip+" to "+vp) + } + + storage, err := fs.getUserStorage(ctx) + if err != nil { + return err + } + + vdp := fs.toDatabasePath(ctx, vp) + basePath := strings.TrimSuffix(vp, vdp) + parts := strings.Split(filepath.Dir(vdp), "/") + walkPath := "" + for i := 0; i < len(parts); i++ { + walkPath = filepath.Join(walkPath, parts[i]) + _, err := fs.filecache.Get(storage, walkPath) + if err == nil { + continue + } + + fi, err := os.Stat(filepath.Join(basePath, walkPath)) + if err != nil { + return errors.Wrap(err, "could not stat parent version directory") + } + data := map[string]interface{}{ + "path": walkPath, + "mimetype": "httpd/unix-directory", + "etag": calcEtag(ctx, fi), + "permissions": 31, // 1: READ, 2: UPDATE, 4: CREATE, 8: DELETE, 16: SHARE + } + + _, err = fs.filecache.InsertOrUpdate(storage, data) + if err != nil { + return errors.Wrap(err, "could not create parent version directory") + } + } + _, err = fs.filecache.Copy(storage, fs.toDatabasePath(ctx, ip), vdp) + return err +} + +func (fs *ocfs) copyMD(s string, t string) (err error) { + var attrs []string + if attrs, err = xattr.List(s); err != nil { + return err + } + for i := range attrs { + if strings.HasPrefix(attrs[i], ocPrefix) { + var d []byte + if d, err = xattr.Get(s, attrs[i]); err != nil { + return err + } + if err = xattr.Set(t, attrs[i], d); err != nil { + return err + } + } + } + return nil +} + +func (fs *ocfs) Download(ctx context.Context, ref *provider.Reference) (io.ReadCloser, error) { + ip, err := fs.resolve(ctx, ref) + if err != nil { + return nil, errors.Wrap(err, "ocfs: error resolving reference") + } + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.InitiateFileDownload { + return nil, errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) + } + return nil, errors.Wrap(err, "ocfs: error reading permissions") + } + + r, err := os.Open(ip) + if err != nil { + if os.IsNotExist(err) { + return nil, errtypes.NotFound(fs.toStoragePath(ctx, ip)) + } + return nil, errors.Wrap(err, "ocfs: error reading "+ip) + } + return r, nil +} + +func (fs *ocfs) ListRevisions(ctx context.Context, ref *provider.Reference) ([]*provider.FileVersion, error) { + ip, err := fs.resolve(ctx, ref) + if err != nil { + return nil, errors.Wrap(err, "ocfs: error resolving reference") + } + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.ListFileVersions { + return nil, errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) + } + return nil, errors.Wrap(err, "ocfs: error reading permissions") + } + + vp := fs.getVersionsPath(ctx, ip) + bn := filepath.Base(ip) + + revisions := []*provider.FileVersion{} + mds, err := ioutil.ReadDir(filepath.Dir(vp)) + if err != nil { + return nil, errors.Wrap(err, "ocfs: error reading"+filepath.Dir(vp)) + } + for i := range mds { + rev := fs.filterAsRevision(ctx, bn, mds[i]) + if rev != nil { + revisions = append(revisions, rev) + } + } + return revisions, nil +} + +func (fs *ocfs) filterAsRevision(ctx context.Context, bn string, md os.FileInfo) *provider.FileVersion { + if strings.HasPrefix(md.Name(), bn) { + // versions have filename.ext.v12345678 + version := md.Name()[len(bn)+2:] // truncate ".v" to get version mtime + mtime, err := strconv.Atoi(version) + if err != nil { + log := appctx.GetLogger(ctx) + log.Error().Err(err).Str("path", md.Name()).Msg("invalid version mtime") + return nil + } + // TODO(jfd) trashed versions are in the files_trashbin/versions folder ... not relevant here + return &provider.FileVersion{ + Key: version, + Size: uint64(md.Size()), + Mtime: uint64(mtime), + Etag: calcEtag(ctx, md), + } + } + return nil +} + +func (fs *ocfs) DownloadRevision(ctx context.Context, ref *provider.Reference, revisionKey string) (io.ReadCloser, error) { + return nil, errtypes.NotSupported("download revision") +} + +func (fs *ocfs) RestoreRevision(ctx context.Context, ref *provider.Reference, revisionKey string) error { + ip, err := fs.resolve(ctx, ref) + if err != nil { + return errors.Wrap(err, "ocfs: error resolving reference") + } + + // check permissions + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.RestoreFileVersion { + return errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) + } + return errors.Wrap(err, "ocfs: error reading permissions") + } + + vp := fs.getVersionsPath(ctx, ip) + rp := vp + ".v" + revisionKey + + // check revision exists + rs, err := os.Stat(rp) + if err != nil { + return err + } + + if !rs.Mode().IsRegular() { + return fmt.Errorf("%s is not a regular file", rp) + } + + source, err := os.Open(rp) + if err != nil { + return err + } + defer source.Close() + + // destination should be available, otherwise we could not have navigated to its revisions + if err := fs.archiveRevision(ctx, fs.getVersionsPath(ctx, ip), ip); err != nil { + return err + } + + destination, err := os.Create(ip) + if err != nil { + // TODO(jfd) bring back revision in case sth goes wrong? + return err + } + defer destination.Close() + + _, err = io.Copy(destination, source) + if err != nil { + return err + } + + sha1h, md5h, adler32h, err := fs.HashFile(ip) + if err != nil { + log.Err(err).Msg("owncloudsql: could not open file for checksumming") + } + fi, err := os.Stat(ip) + if err != nil { + return err + } + mtime := time.Now().Unix() + data := map[string]interface{}{ + "path": fs.toDatabasePath(ctx, ip), + "checksum": fmt.Sprintf("SHA1:%032x MD5:%032x ADLER32:%032x", sha1h, md5h, adler32h), + "etag": calcEtag(ctx, fi), + "size": fi.Size(), + "mimetype": mime.Detect(false, ip), + "mtime": mtime, + "storage_mtime": mtime, + } + storageID, err := fs.getUserStorage(ctx) + if err != nil { + return err + } + _, err = fs.filecache.InsertOrUpdate(storageID, data) + if err != nil { + return err + } + + // TODO(jfd) bring back revision in case sth goes wrong? + return fs.propagate(ctx, ip) +} + +func (fs *ocfs) PurgeRecycleItem(ctx context.Context, key string) error { + rp, err := fs.getRecyclePath(ctx) + if err != nil { + return errors.Wrap(err, "ocfs: error resolving recycle path") + } + ip := filepath.Join(rp, filepath.Clean(key)) + // TODO check permission? + + // check permissions + /* are they stored in the trash? + if perm, err := fs.readPermissions(ctx, ip); err == nil { + if !perm.ListContainer { + return nil, errtypes.PermissionDenied("") + } + } else { + if isNotFound(err) { + return nil, errtypes.NotFound(fs.unwrap(ctx, filepath.Dir(ip))) + } + return nil, errors.Wrap(err, "ocfs: error reading permissions") + } + */ + + err = os.RemoveAll(ip) + if err != nil { + return errors.Wrap(err, "ocfs: error deleting recycle item") + } + err = os.RemoveAll(filepath.Join(filepath.Dir(rp), "versions", filepath.Clean(key))) + if err != nil { + return errors.Wrap(err, "ocfs: error deleting recycle item versions") + } + + base, ttime, err := splitTrashKey(key) + if err != nil { + return err + } + err = fs.filecache.PurgeRecycleItem(user.ContextMustGetUser(ctx).Username, base, ttime) + if err != nil { + return err + } + // TODO delete keyfiles, keys, share-keys + return nil +} + +func (fs *ocfs) EmptyRecycle(ctx context.Context) error { + // TODO check permission? on what? user must be the owner + rp, err := fs.getRecyclePath(ctx) + if err != nil { + return errors.Wrap(err, "ocfs: error resolving recycle path") + } + err = os.RemoveAll(rp) + if err != nil { + return errors.Wrap(err, "ocfs: error deleting recycle files") + } + err = os.RemoveAll(filepath.Join(filepath.Dir(rp), "versions")) + if err != nil { + return errors.Wrap(err, "ocfs: error deleting recycle files versions") + } + // TODO delete keyfiles, keys, share-keys ... or just everything? + return nil +} + +func splitTrashKey(key string) (string, int, error) { + // trashbin items have filename.ext.d12345678 + suffix := filepath.Ext(key) + if len(suffix) == 0 || !strings.HasPrefix(suffix, ".d") { + return "", -1, fmt.Errorf("invalid suffix") + } + trashtime := suffix[2:] // truncate "d" to get trashbin time + ttime, err := strconv.Atoi(trashtime) + if err != nil { + return "", -1, fmt.Errorf("invalid suffix") + } + return strings.TrimSuffix(filepath.Base(key), suffix), ttime, nil +} + +func (fs *ocfs) convertToRecycleItem(ctx context.Context, md os.FileInfo) *provider.RecycleItem { + base, ttime, err := splitTrashKey(md.Name()) + if err != nil { + log := appctx.GetLogger(ctx) + log.Error().Str("path", md.Name()).Msg("invalid trash item key") + } + + u := user.ContextMustGetUser(ctx) + item, err := fs.filecache.GetRecycleItem(u.Username, base, ttime) + if err != nil { + log := appctx.GetLogger(ctx) + log.Error().Err(err).Str("path", md.Name()).Msg("could not get trash item") + return nil + } + + // ownCloud 10 stores the parent dir of the deleted item as the location in the oc_files_trashbin table + // we use extended attributes for original location, but also only the parent location, which is why + // we need to join and trim the path when listing it + originalPath := filepath.Join(item.Path, base) + + return &provider.RecycleItem{ + Type: getResourceType(md.IsDir()), + Key: md.Name(), + // TODO do we need to prefix the path? it should be relative to this storage root, right? + Path: originalPath, + Size: uint64(md.Size()), + DeletionTime: &types.Timestamp{ + Seconds: uint64(ttime), + // no nanos available + }, + } +} + +func (fs *ocfs) ListRecycle(ctx context.Context) ([]*provider.RecycleItem, error) { + // TODO check permission? on what? user must be the owner? + rp, err := fs.getRecyclePath(ctx) + if err != nil { + return nil, errors.Wrap(err, "ocfs: error resolving recycle path") + } + + // list files folder + mds, err := ioutil.ReadDir(rp) + if err != nil { + log := appctx.GetLogger(ctx) + log.Debug().Err(err).Str("path", rp).Msg("trash not readable") + // TODO jfd only ignore not found errors + return []*provider.RecycleItem{}, nil + } + // TODO (jfd) limit and offset + items := []*provider.RecycleItem{} + for i := range mds { + ri := fs.convertToRecycleItem(ctx, mds[i]) + if ri != nil { + items = append(items, ri) + } + + } + return items, nil +} + +func (fs *ocfs) RestoreRecycleItem(ctx context.Context, key, restorePath string) error { + // TODO check permission? on what? user must be the owner? + log := appctx.GetLogger(ctx) + rp, err := fs.getRecyclePath(ctx) + if err != nil { + return errors.Wrap(err, "ocfs: error resolving recycle path") + } + src := filepath.Join(rp, filepath.Clean(key)) + + suffix := filepath.Ext(src) + if len(suffix) == 0 || !strings.HasPrefix(suffix, ".d") { + log.Error().Str("key", key).Str("path", src).Msg("invalid trash item suffix") + return nil + } + + if restorePath == "" { + v, err := xattr.Get(src, trashOriginPrefix) + if err != nil { + log.Error().Err(err).Str("key", key).Str("path", src).Msg("could not read origin") + } + restorePath = filepath.Join("/", filepath.Clean(string(v)), strings.TrimSuffix(filepath.Base(src), suffix)) + } + tgt := fs.toInternalPath(ctx, restorePath) + // move back to original location + if err := os.Rename(src, tgt); err != nil { + log.Error().Err(err).Str("key", key).Str("restorePath", restorePath).Str("src", src).Str("tgt", tgt).Msg("could not restore item") + return errors.Wrap(err, "ocfs: could not restore item") + } + + storage, err := fs.getUserStorage(ctx) + if err != nil { + return err + } + err = fs.filecache.Move(storage, fs.toDatabasePath(ctx, src), fs.toDatabasePath(ctx, tgt)) + if err != nil { + return err + } + + // TODO(jfd) restore versions + + return fs.propagate(ctx, tgt) +} + +func (fs *ocfs) propagate(ctx context.Context, leafPath string) error { + var root string + if fs.c.EnableHome { + root = fs.toInternalPath(ctx, "/") + } else { + owner := fs.getOwner(leafPath) + root = fs.toInternalPath(ctx, owner) + } + if !strings.HasPrefix(leafPath, root) { + err := errors.New("internal path outside root") + appctx.GetLogger(ctx).Error(). + Err(err). + Str("leafPath", leafPath). + Str("root", root). + Msg("could not propagate change") + return err + } + + fi, err := os.Stat(leafPath) + if err != nil { + appctx.GetLogger(ctx).Error(). + Err(err). + Str("leafPath", leafPath). + Str("root", root). + Msg("could not propagate change") + return err + } + + storageID, err := fs.getUserStorage(ctx) + if err != nil { + return err + } + parts := strings.Split(strings.TrimPrefix(leafPath, root), "/") + // root never ends in / so the split returns an empty first element, which we can skip + // we do not need to chmod the last element because it is the leaf path (< and not <= comparison) + for i := 1; i < len(parts); i++ { + appctx.GetLogger(ctx).Debug(). + Str("leafPath", leafPath). + Str("root", root). + Int("i", i). + Interface("parts", parts). + Msg("propagating change") + if err := os.Chtimes(filepath.Join(root), fi.ModTime(), fi.ModTime()); err != nil { + appctx.GetLogger(ctx).Error(). + Err(err). + Str("leafPath", leafPath). + Str("root", root). + Msg("could not propagate change") + return err + } + fi, err := os.Stat(filepath.Join(root)) + if err != nil { + return err + } + etag := calcEtag(ctx, fi) + if err := fs.filecache.SetEtag(storageID, fs.toDatabasePath(ctx, root), etag); err != nil { + appctx.GetLogger(ctx).Error(). + Err(err). + Str("leafPath", leafPath). + Str("root", root). + Msg("could not set etag") + return err + } + root = filepath.Join(root, parts[i]) + } + return nil +} + +func (fs *ocfs) HashFile(path string) (string, string, string, error) { + sha1h := sha1.New() + md5h := md5.New() + adler32h := adler32.New() + { + f, err := os.Open(path) + if err != nil { + return "", "", "", errors.Wrap(err, "owncloudsql: could not copy bytes for checksumming") + } + defer f.Close() + + r1 := io.TeeReader(f, sha1h) + r2 := io.TeeReader(r1, md5h) + + if _, err := io.Copy(adler32h, r2); err != nil { + return "", "", "", errors.Wrap(err, "owncloudsql: could not copy bytes for checksumming") + } + + return string(sha1h.Sum(nil)), string(md5h.Sum(nil)), string(adler32h.Sum(nil)), nil + } +} + +func readChecksumIntoResourceChecksum(ctx context.Context, checksums, algo string, ri *provider.ResourceInfo) { + re := regexp.MustCompile(strings.ToUpper(algo) + `:(.*)`) + matches := re.FindStringSubmatch(checksums) + if len(matches) < 2 { + appctx.GetLogger(ctx). + Debug(). + Str("nodepath", checksums). + Str("algorithm", algo). + Msg("checksum not set") + } else { + ri.Checksum = &provider.ResourceChecksum{ + Type: storageprovider.PKG2GRPCXS(algo), + Sum: matches[1], + } + } +} + +func readChecksumIntoOpaque(ctx context.Context, checksums, algo string, ri *provider.ResourceInfo) { + re := regexp.MustCompile(strings.ToUpper(algo) + `:(.*)`) + matches := re.FindStringSubmatch(checksums) + if len(matches) < 2 { + appctx.GetLogger(ctx). + Debug(). + Str("nodepath", checksums). + Str("algorithm", algo). + Msg("checksum not set") + } else { + if ri.Opaque == nil { + ri.Opaque = &types.Opaque{ + Map: map[string]*types.OpaqueEntry{}, + } + } + ri.Opaque.Map[algo] = &types.OpaqueEntry{ + Decoder: "plain", + Value: []byte(matches[1]), + } + } +} + +// TODO propagate etag and mtime or append event to history? propagate on disk ... +// - but propagation is a separate task. only if upload was successful ... diff --git a/pkg/storage/fs/owncloudsql/owncloudsql_unix.go b/pkg/storage/fs/owncloudsql/owncloudsql_unix.go new file mode 100755 index 0000000000..80006bfaf7 --- /dev/null +++ b/pkg/storage/fs/owncloudsql/owncloudsql_unix.go @@ -0,0 +1,68 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +// +build !windows + +package owncloudsql + +import ( + "context" + "crypto/md5" + "encoding/binary" + "fmt" + "os" + "strings" + "syscall" + + "github.com/cs3org/reva/pkg/appctx" +) + +// TODO(jfd) get rid of the differences between unix and windows. the inode and dev should never be used for the etag because it interferes with backups + +// calcEtag will create an etag based on the md5 of +// - mtime, +// - inode (if available), +// - device (if available) and +// - size. +// errors are logged, but an etag will still be returned +func calcEtag(ctx context.Context, fi os.FileInfo) string { + log := appctx.GetLogger(ctx) + h := md5.New() + err := binary.Write(h, binary.BigEndian, fi.ModTime().UnixNano()) + if err != nil { + log.Error().Err(err).Msg("error writing mtime") + } + stat, ok := fi.Sys().(*syscall.Stat_t) + if ok { + // take device and inode into account + err = binary.Write(h, binary.BigEndian, stat.Ino) + if err != nil { + log.Error().Err(err).Msg("error writing inode") + } + err = binary.Write(h, binary.BigEndian, stat.Dev) + if err != nil { + log.Error().Err(err).Msg("error writing device") + } + } + err = binary.Write(h, binary.BigEndian, fi.Size()) + if err != nil { + log.Error().Err(err).Msg("error writing size") + } + etag := fmt.Sprintf(`"%x"`, h.Sum(nil)) + return fmt.Sprintf("\"%s\"", strings.Trim(etag, "\"")) +} diff --git a/pkg/storage/fs/owncloudsql/owncloudsql_windows.go b/pkg/storage/fs/owncloudsql/owncloudsql_windows.go new file mode 100644 index 0000000000..847de9a0f0 --- /dev/null +++ b/pkg/storage/fs/owncloudsql/owncloudsql_windows.go @@ -0,0 +1,54 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +// +build windows + +package owncloudsql + +import ( + "context" + "crypto/md5" + "encoding/binary" + "fmt" + "os" + "strings" + + "github.com/cs3org/reva/pkg/appctx" +) + +// calcEtag will create an etag based on the md5 of +// - mtime, +// - inode (if available), +// - device (if available) and +// - size. +// errors are logged, but an etag will still be returned +func calcEtag(ctx context.Context, fi os.FileInfo) string { + log := appctx.GetLogger(ctx) + h := md5.New() + err := binary.Write(h, binary.BigEndian, fi.ModTime().UnixNano()) + if err != nil { + log.Error().Err(err).Msg("error writing mtime") + } + // device and inode have no meaning on windows + err = binary.Write(h, binary.BigEndian, fi.Size()) + if err != nil { + log.Error().Err(err).Msg("error writing size") + } + etag := fmt.Sprintf(`"%x"`, h.Sum(nil)) + return fmt.Sprintf("\"%s\"", strings.Trim(etag, "\"")) +} diff --git a/pkg/storage/fs/owncloudsql/upload.go b/pkg/storage/fs/owncloudsql/upload.go new file mode 100644 index 0000000000..d7cb975abb --- /dev/null +++ b/pkg/storage/fs/owncloudsql/upload.go @@ -0,0 +1,522 @@ +// Copyright 2018-2021 CERN +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// In applying this license, CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +package owncloudsql + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" + provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" + "github.com/cs3org/reva/pkg/appctx" + "github.com/cs3org/reva/pkg/errtypes" + "github.com/cs3org/reva/pkg/logger" + "github.com/cs3org/reva/pkg/mime" + "github.com/cs3org/reva/pkg/storage/utils/chunking" + "github.com/cs3org/reva/pkg/storage/utils/templates" + "github.com/cs3org/reva/pkg/user" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/rs/zerolog/log" + tusd "github.com/tus/tusd/pkg/handler" +) + +var defaultFilePerm = os.FileMode(0664) + +func (fs *ocfs) Upload(ctx context.Context, ref *provider.Reference, r io.ReadCloser) error { + upload, err := fs.GetUpload(ctx, ref.GetPath()) + if err != nil { + // Upload corresponding to this ID was not found. + // Assume that this corresponds to the resource path to which the file has to be uploaded. + + // Set the length to 0 and set SizeIsDeferred to true + metadata := map[string]string{"sizedeferred": "true"} + uploadIDs, err := fs.InitiateUpload(ctx, ref, 0, metadata) + if err != nil { + return err + } + if upload, err = fs.GetUpload(ctx, uploadIDs["simple"]); err != nil { + return errors.Wrap(err, "ocfs: error retrieving upload") + } + } + + uploadInfo := upload.(*fileUpload) + + p := uploadInfo.info.Storage["InternalDestination"] + ok, err := chunking.IsChunked(p) + if err != nil { + return errors.Wrap(err, "ocfs: error checking path") + } + if ok { + var assembledFile string + p, assembledFile, err = fs.chunkHandler.WriteChunk(p, r) + if err != nil { + return err + } + if p == "" { + if err = uploadInfo.Terminate(ctx); err != nil { + return errors.Wrap(err, "ocfs: error removing auxiliary files") + } + return errtypes.PartialContent(ref.String()) + } + uploadInfo.info.Storage["InternalDestination"] = p + fd, err := os.Open(assembledFile) + if err != nil { + return errors.Wrap(err, "ocfs: error opening assembled file") + } + defer fd.Close() + defer os.RemoveAll(assembledFile) + r = fd + } + + if _, err := uploadInfo.WriteChunk(ctx, 0, r); err != nil { + return errors.Wrap(err, "ocfs: error writing to binary file") + } + + return uploadInfo.FinishUpload(ctx) +} + +// InitiateUpload returns upload ids corresponding to different protocols it supports +// TODO read optional content for small files in this request +func (fs *ocfs) InitiateUpload(ctx context.Context, ref *provider.Reference, uploadLength int64, metadata map[string]string) (map[string]string, error) { + ip, err := fs.resolve(ctx, ref) + if err != nil { + return nil, errors.Wrap(err, "ocfs: error resolving reference") + } + + // permissions are checked in NewUpload below + + p := fs.toStoragePath(ctx, ip) + + info := tusd.FileInfo{ + MetaData: tusd.MetaData{ + "filename": filepath.Base(p), + "dir": filepath.Dir(p), + }, + Size: uploadLength, + } + + if metadata != nil { + if metadata["mtime"] != "" { + info.MetaData["mtime"] = metadata["mtime"] + } + if _, ok := metadata["sizedeferred"]; ok { + info.SizeIsDeferred = true + } + } + + upload, err := fs.NewUpload(ctx, info) + if err != nil { + return nil, err + } + + info, _ = upload.GetInfo(ctx) + + return map[string]string{ + "simple": info.ID, + "tus": info.ID, + }, nil +} + +// UseIn tells the tus upload middleware which extensions it supports. +func (fs *ocfs) UseIn(composer *tusd.StoreComposer) { + composer.UseCore(fs) + composer.UseTerminater(fs) + composer.UseConcater(fs) + composer.UseLengthDeferrer(fs) +} + +// To implement the core tus.io protocol as specified in https://tus.io/protocols/resumable-upload.html#core-protocol +// - the storage needs to implement NewUpload and GetUpload +// - the upload needs to implement the tusd.Upload interface: WriteChunk, GetInfo, GetReader and FinishUpload + +func (fs *ocfs) NewUpload(ctx context.Context, info tusd.FileInfo) (upload tusd.Upload, err error) { + + log := appctx.GetLogger(ctx) + log.Debug().Interface("info", info).Msg("ocfs: NewUpload") + + if info.MetaData["filename"] == "" { + return nil, errors.New("ocfs: missing filename in metadata") + } + info.MetaData["filename"] = filepath.Clean(info.MetaData["filename"]) + + dir := info.MetaData["dir"] + if dir == "" { + return nil, errors.New("ocfs: missing dir in metadata") + } + info.MetaData["dir"] = filepath.Clean(info.MetaData["dir"]) + + ip := fs.toInternalPath(ctx, filepath.Join(info.MetaData["dir"], info.MetaData["filename"])) + + // check permissions + var perm *provider.ResourcePermissions + var perr error + // if destination exists + if _, err := os.Stat(ip); err == nil { + // check permissions of file to be overwritten + perm, perr = fs.readPermissions(ctx, ip) + } else { + // check permissions of parent folder + perm, perr = fs.readPermissions(ctx, filepath.Dir(ip)) + } + if perr == nil { + if !perm.InitiateFileUpload { + return nil, errtypes.PermissionDenied("") + } + } else { + if os.IsNotExist(err) { + return nil, errtypes.NotFound(fs.toStoragePath(ctx, filepath.Dir(ip))) + } + return nil, errors.Wrap(err, "ocfs: error reading permissions") + } + + log.Debug().Interface("info", info).Msg("ocfs: resolved filename") + + info.ID = uuid.New().String() + + binPath, err := fs.getUploadPath(ctx, info.ID) + if err != nil { + return nil, errors.Wrap(err, "ocfs: error resolving upload path") + } + usr := user.ContextMustGetUser(ctx) + storageID, err := fs.getUserStorage(ctx) + if err != nil { + return nil, err + } + info.Storage = map[string]string{ + "Type": "OwnCloudStore", + "BinPath": binPath, + "InternalDestination": ip, + + "Idp": usr.Id.Idp, + "UserId": usr.Id.OpaqueId, + "UserName": usr.Username, + + "LogLevel": log.GetLevel().String(), + + "StorageId": strconv.Itoa(storageID), + } + // Create binary file in the upload folder with no content + log.Debug().Interface("info", info).Msg("ocfs: built storage info") + file, err := os.OpenFile(binPath, os.O_CREATE|os.O_WRONLY, defaultFilePerm) + if err != nil { + return nil, err + } + defer file.Close() + + u := &fileUpload{ + info: info, + binPath: binPath, + infoPath: filepath.Join(fs.c.UploadInfoDir, info.ID+".info"), + fs: fs, + ctx: ctx, + } + + if !info.SizeIsDeferred && info.Size == 0 { + log.Debug().Interface("info", info).Msg("ocfs: finishing upload for empty file") + // no need to create info file and finish directly + err := u.FinishUpload(ctx) + if err != nil { + return nil, err + } + return u, nil + } + + // writeInfo creates the file by itself if necessary + err = u.writeInfo() + if err != nil { + return nil, err + } + + return u, nil +} + +func (fs *ocfs) getUploadPath(ctx context.Context, uploadID string) (string, error) { + u, ok := user.ContextGetUser(ctx) + if !ok { + err := errors.Wrap(errtypes.UserRequired("userrequired"), "error getting user from ctx") + return "", err + } + layout := templates.WithUser(u, fs.c.UserLayout) + return filepath.Join(fs.c.DataDirectory, layout, "uploads", uploadID), nil +} + +// GetUpload returns the Upload for the given upload id +func (fs *ocfs) GetUpload(ctx context.Context, id string) (tusd.Upload, error) { + infoPath := filepath.Join(fs.c.UploadInfoDir, id+".info") + + info := tusd.FileInfo{} + data, err := ioutil.ReadFile(infoPath) + if err != nil { + return nil, err + } + if err := json.Unmarshal(data, &info); err != nil { + return nil, err + } + + stat, err := os.Stat(info.Storage["BinPath"]) + if err != nil { + return nil, err + } + + info.Offset = stat.Size() + + u := &userpb.User{ + Id: &userpb.UserId{ + Idp: info.Storage["Idp"], + OpaqueId: info.Storage["UserId"], + }, + Username: info.Storage["UserName"], + } + + ctx = user.ContextSetUser(ctx, u) + // TODO configure the logger the same way ... store and add traceid in file info + + var opts []logger.Option + opts = append(opts, logger.WithLevel(info.Storage["LogLevel"])) + opts = append(opts, logger.WithWriter(os.Stderr, logger.ConsoleMode)) + l := logger.New(opts...) + + sub := l.With().Int("pid", os.Getpid()).Logger() + + ctx = appctx.WithLogger(ctx, &sub) + + return &fileUpload{ + info: info, + binPath: info.Storage["BinPath"], + infoPath: infoPath, + fs: fs, + ctx: ctx, + }, nil +} + +type fileUpload struct { + // info stores the current information about the upload + info tusd.FileInfo + // infoPath is the path to the .info file + infoPath string + // binPath is the path to the binary file (which has no extension) + binPath string + // only fs knows how to handle metadata and versions + fs *ocfs + // a context with a user + // TODO add logger as well? + ctx context.Context +} + +// GetInfo returns the FileInfo +func (upload *fileUpload) GetInfo(ctx context.Context) (tusd.FileInfo, error) { + return upload.info, nil +} + +// WriteChunk writes the stream from the reader to the given offset of the upload +func (upload *fileUpload) WriteChunk(ctx context.Context, offset int64, src io.Reader) (int64, error) { + file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm) + if err != nil { + return 0, err + } + defer file.Close() + + n, err := io.Copy(file, src) + + // If the HTTP PATCH request gets interrupted in the middle (e.g. because + // the user wants to pause the upload), Go's net/http returns an io.ErrUnexpectedEOF. + // However, for OwnCloudStore it's not important whether the stream has ended + // on purpose or accidentally. + if err != nil { + if err != io.ErrUnexpectedEOF { + return n, err + } + } + + upload.info.Offset += n + err = upload.writeInfo() // TODO info is written here ... we need to truncate in DiscardChunk + + return n, err +} + +// GetReader returns an io.Reader for the upload +func (upload *fileUpload) GetReader(ctx context.Context) (io.Reader, error) { + return os.Open(upload.binPath) +} + +// writeInfo updates the entire information. Everything will be overwritten. +func (upload *fileUpload) writeInfo() error { + log.Debug().Str("path", upload.infoPath).Msg("Writing info file") + data, err := json.Marshal(upload.info) + if err != nil { + return err + } + return ioutil.WriteFile(upload.infoPath, data, defaultFilePerm) +} + +// FinishUpload finishes an upload and moves the file to the internal destination +func (upload *fileUpload) FinishUpload(ctx context.Context) error { + + /* + checksum := upload.info.MetaData["checksum"] + if checksum != "" { + // TODO check checksum + s := strings.SplitN(checksum, " ", 2) + if len(s) == 2 { + alg, hash := s[0], s[1] + + } + } + */ + + ip := upload.info.Storage["InternalDestination"] + + // if destination exists + // TODO check etag with If-Match header + if _, err := os.Stat(ip); err == nil { + // copy attributes of existing file to tmp file + if err := upload.fs.copyMD(ip, upload.binPath); err != nil { + return errors.Wrap(err, "ocfs: error copying metadata from "+ip+" to "+upload.binPath) + } + // create revision + if err := upload.fs.archiveRevision(upload.ctx, upload.fs.getVersionsPath(upload.ctx, ip), ip); err != nil { + return err + } + } + + sha1h, md5h, adler32h, err := upload.fs.HashFile(upload.binPath) + if err != nil { + log.Err(err).Msg("owncloudsql: could not open file for checksumming") + } + + err = os.Rename(upload.binPath, ip) + if err != nil { + log.Err(err).Interface("info", upload.info). + Str("binPath", upload.binPath). + Str("ipath", ip). + Msg("ocfs: could not rename") + return err + } + + var fi os.FileInfo + fi, err = os.Stat(ip) + if err != nil { + return err + } + + data := map[string]interface{}{ + "path": upload.fs.toDatabasePath(upload.ctx, ip), + "checksum": fmt.Sprintf("SHA1:%032x MD5:%032x ADLER32:%032x", sha1h, md5h, adler32h), + "etag": calcEtag(upload.ctx, fi), + "size": upload.info.Size, + "mimetype": mime.Detect(false, ip), + "permissions": 27, // 1: READ, 2: UPDATE, 4: CREATE, 8: DELETE, 16: SHARE + "mtime": upload.info.MetaData["mtime"], + "storage_mtime": upload.info.MetaData["mtime"], + } + _, err = upload.fs.filecache.InsertOrUpdate(upload.info.Storage["StorageId"], data) + if err != nil { + return err + } + + // only delete the upload if it was successfully written to the storage + if err := os.Remove(upload.infoPath); err != nil { + if !os.IsNotExist(err) { + log.Err(err).Interface("info", upload.info).Msg("ocfs: could not delete upload info") + return err + } + } + + return upload.fs.propagate(upload.ctx, ip) +} + +// To implement the termination extension as specified in https://tus.io/protocols/resumable-upload.html#termination +// - the storage needs to implement AsTerminatableUpload +// - the upload needs to implement Terminate + +// AsTerminatableUpload returns a TerminatableUpload +func (fs *ocfs) AsTerminatableUpload(upload tusd.Upload) tusd.TerminatableUpload { + return upload.(*fileUpload) +} + +// Terminate terminates the upload +func (upload *fileUpload) Terminate(ctx context.Context) error { + if err := os.Remove(upload.infoPath); err != nil { + if !os.IsNotExist(err) { + return err + } + } + if err := os.Remove(upload.binPath); err != nil { + if !os.IsNotExist(err) { + return err + } + } + return nil +} + +// To implement the creation-defer-length extension as specified in https://tus.io/protocols/resumable-upload.html#creation +// - the storage needs to implement AsLengthDeclarableUpload +// - the upload needs to implement DeclareLength + +// AsLengthDeclarableUpload returns a LengthDeclarableUpload +func (fs *ocfs) AsLengthDeclarableUpload(upload tusd.Upload) tusd.LengthDeclarableUpload { + return upload.(*fileUpload) +} + +// DeclareLength updates the upload length information +func (upload *fileUpload) DeclareLength(ctx context.Context, length int64) error { + upload.info.Size = length + upload.info.SizeIsDeferred = false + return upload.writeInfo() +} + +// To implement the concatenation extension as specified in https://tus.io/protocols/resumable-upload.html#concatenation +// - the storage needs to implement AsConcatableUpload +// - the upload needs to implement ConcatUploads + +// AsConcatableUpload returns a ConcatableUpload +func (fs *ocfs) AsConcatableUpload(upload tusd.Upload) tusd.ConcatableUpload { + return upload.(*fileUpload) +} + +// ConcatUploads concatenates multiple uploads +func (upload *fileUpload) ConcatUploads(ctx context.Context, uploads []tusd.Upload) (err error) { + file, err := os.OpenFile(upload.binPath, os.O_WRONLY|os.O_APPEND, defaultFilePerm) + if err != nil { + return err + } + defer file.Close() + + for _, partialUpload := range uploads { + fileUpload := partialUpload.(*fileUpload) + + src, err := os.Open(fileUpload.binPath) + if err != nil { + return err + } + + if _, err := io.Copy(file, src); err != nil { + return err + } + } + + return +}