Skip to content
This repository has been archived by the owner on Jun 27, 2023. It is now read-only.

Support for storing file mode and modification times #85

Closed
wants to merge 19 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
63c19f7
initial support for storing mode and mtime for files
kstuart Nov 5, 2020
23a21f8
rename function SetMtime to SetModTime
kstuart Nov 10, 2020
dbab990
rename FSNode.MTime to FSNode.ModTime
kstuart Aug 26, 2021
f5aff86
Merge branch 'master' of https://github.com/ipfs/go-unixfs into feat/…
kstuart Sep 2, 2021
9321db3
support updating the last modification time when file data changes
kstuart Sep 6, 2021
7985eee
cache file mode and last modification time in DagReader
kstuart Sep 6, 2021
b8c85d3
Merge remote-tracking branch 'upstream/master' into feat/unixfs/ufs15
kstuart Jan 1, 2022
0758fa7
implement unixfs 1.5 conformance
kstuart Jan 8, 2022
eeaa081
set mode and mtime during dag layout
kstuart Jun 26, 2022
46fe0fb
update fsn modification time if needed
kstuart Jun 26, 2022
199e170
support creating folder nodes with mode and mtime
kstuart Jun 26, 2022
6370d6e
refactor and harden FSN mode and mtime support
kstuart Jun 26, 2022
d496320
support getting stored mode and mtime from ufsDirectory
kstuart Jun 26, 2022
f73da15
support getting stored mtime for a Symlink
kstuart Jun 26, 2022
cff2736
only read mode and mtime for specific case
kstuart Jun 26, 2022
40ca5de
Merge branch 'master' of https://github.com/ipfs/go-unixfs into feat/…
kstuart Oct 25, 2022
ca7f8c8
add balanced layout metadata tests
kstuart Oct 31, 2022
363a597
add trickle tests with mode and modification time
kstuart Nov 2, 2022
dadb61f
always set protoNode mode/mtime for NewDagReader
kstuart Nov 2, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 35 additions & 6 deletions file/unixfile.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ package unixfile
import (
"context"
"errors"
"os"
"time"

ft "github.com/ipfs/go-unixfs"
uio "github.com/ipfs/go-unixfs/io"
Expand All @@ -21,6 +23,8 @@ type ufsDirectory struct {
dserv ipld.DAGService
dir uio.Directory
size int64
mode os.FileMode
mtime time.Time
}

type ufsIterator struct {
Expand Down Expand Up @@ -122,6 +126,14 @@ func (d *ufsDirectory) Size() (int64, error) {
return d.size, nil
}

func (d *ufsDirectory) Mode() os.FileMode {
return d.mode
}

func (d *ufsDirectory) ModTime() time.Time {
return d.mtime
}

type ufsFile struct {
uio.DagReader
}
Expand All @@ -130,6 +142,14 @@ func (f *ufsFile) Size() (int64, error) {
return int64(f.DagReader.Size()), nil
}

func (f *ufsFile) Mode() os.FileMode {
return f.DagReader.Mode()
}

func (f *ufsFile) ModTime() time.Time {
return f.DagReader.ModTime()
}

func newUnixfsDir(ctx context.Context, dserv ipld.DAGService, nd *dag.ProtoNode) (files.Directory, error) {
dir, err := uio.NewDirectoryFromNode(dserv, nd)
if err != nil {
Expand All @@ -141,27 +161,37 @@ func newUnixfsDir(ctx context.Context, dserv ipld.DAGService, nd *dag.ProtoNode)
return nil, err
}

fsn, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return nil, err
}

return &ufsDirectory{
ctx: ctx,
dserv: dserv,

dir: dir,
size: int64(size),
dir: dir,
size: int64(size),
mode: fsn.Mode(),
mtime: fsn.ModTime(),
}, nil
}

func NewUnixfsFile(ctx context.Context, dserv ipld.DAGService, nd ipld.Node) (files.Node, error) {
var ufs = ufsFile{}

switch dn := nd.(type) {
case *dag.ProtoNode:
fsn, err := ft.FSNodeFromBytes(dn.Data())
if err != nil {
return nil, err
}

if fsn.IsDir() {
return newUnixfsDir(ctx, dserv, dn)
}
if fsn.Type() == ft.TSymlink {
return files.NewLinkFile(string(fsn.Data()), nil), nil
return files.NewSymlinkFile(string(fsn.Data()), fsn.ModTime()), nil
}

case *dag.RawNode:
Expand All @@ -174,9 +204,8 @@ func NewUnixfsFile(ctx context.Context, dserv ipld.DAGService, nd ipld.Node) (fi
return nil, err
}

return &ufsFile{
DagReader: dr,
}, nil
ufs.DagReader = dr
return &ufs, nil
}

var _ files.Directory = &ufsDirectory{}
Expand Down
48 changes: 48 additions & 0 deletions importer/balanced/balanced_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"io"
mrand "math/rand"
"testing"
"time"

h "github.com/ipfs/go-unixfs/importer/helpers"
uio "github.com/ipfs/go-unixfs/io"
Expand All @@ -26,6 +27,10 @@ func buildTestDag(ds ipld.DAGService, spl chunker.Splitter) (*dag.ProtoNode, err
Maxlinks: h.DefaultLinksPerBlock,
}

return buildTestDagWithParams(spl, dbp)
}

func buildTestDagWithParams(spl chunker.Splitter, dbp h.DagBuilderParams) (*dag.ProtoNode, error) {
db, err := dbp.New(spl)
if err != nil {
return nil, err
Expand Down Expand Up @@ -336,3 +341,46 @@ func TestSeekingConsistency(t *testing.T) {
t.Fatal(err)
}
}

func TestMetadataNoData(t *testing.T) {
testMetadata(t, new(bytes.Buffer))
}

func TestMetadata(t *testing.T) {
nbytes := 3 * chunker.DefaultBlockSize
buf := new(bytes.Buffer)
_, err := io.CopyN(buf, u.NewTimeSeededRand(), nbytes)
if err != nil {
t.Fatal(err)
}

testMetadata(t, buf)
}

func testMetadata(t *testing.T, buf *bytes.Buffer) {
dagserv := mdtest.Mock()
dbp := h.DagBuilderParams{
Dagserv: dagserv,
Maxlinks: h.DefaultLinksPerBlock,
FileMode: 0522,
FileModTime: time.Unix(1638111600, 76552),
}

nd, err := buildTestDagWithParams(chunker.DefaultSplitter(buf), dbp)
if err != nil {
t.Fatal(err)
}

dr, err := uio.NewDagReader(context.Background(), nd, dagserv)
if err != nil {
t.Fatal(err)
}

if !dr.ModTime().Equal(dbp.FileModTime) {
t.Errorf("got modtime %v, wanted %v", dr.ModTime(), dbp.FileModTime)
}

if dr.Mode() != dbp.FileMode {
t.Errorf("got filemode %o, wanted %o", dr.Mode(), dbp.FileMode)
}
}
30 changes: 22 additions & 8 deletions importer/balanced/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ package balanced

import (
"errors"

ft "github.com/ipfs/go-unixfs"
h "github.com/ipfs/go-unixfs/importer/helpers"

Expand Down Expand Up @@ -130,18 +129,33 @@ import (
// | Chunk 1 | | Chunk 2 | | Chunk 3 |
// +=========+ +=========+ + - - - - +
func Layout(db *h.DagBuilderHelper) (ipld.Node, error) {
var root ipld.Node
var err error

if db.Done() {
// No data, return just an empty node.
root, err := db.NewLeafNode(nil, ft.TFile)
if err != nil {
return nil, err
}
// No data, just create an empty node.
root, err = db.NewLeafNode(nil, ft.TFile)
// This works without Filestore support (`ProcessFileStore`).
// TODO: Why? Is there a test case missing?
} else {
root, err = layoutData(db)
}

return root, db.Add(root)
if err != nil {
return nil, err
}

if db.HasFileAttributes() {
err = db.SetFileAttributes(root)
if err != nil {
return nil, err
}
}

return root, db.Add(root)
}

func layoutData(db *h.DagBuilderHelper) (ipld.Node, error) {
// The first `root` will be a single leaf node with data
// (corner case), after that subsequent `root` nodes will
// always be internal nodes (with a depth > 0) that can
Expand Down Expand Up @@ -169,7 +183,7 @@ func Layout(db *h.DagBuilderHelper) (ipld.Node, error) {
}
}

return root, db.Add(root)
return root, nil
}

// fillNodeRec will "fill" the given internal (non-leaf) `node` with data by
Expand Down
90 changes: 75 additions & 15 deletions importer/helpers/dagbuilder.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"errors"
"io"
"os"
"time"

dag "github.com/ipfs/go-merkledag"

Expand All @@ -23,13 +24,15 @@ var ErrMissingFsRef = errors.New("missing file path or URL, can't create filesto
// DagBuilderHelper wraps together a bunch of objects needed to
// efficiently create unixfs dag trees
type DagBuilderHelper struct {
dserv ipld.DAGService
spl chunker.Splitter
recvdErr error
rawLeaves bool
nextData []byte // the next item to return.
maxlinks int
cidBuilder cid.Builder
dserv ipld.DAGService
spl chunker.Splitter
recvdErr error
rawLeaves bool
nextData []byte // the next item to return.
maxlinks int
cidBuilder cid.Builder
fileMode os.FileMode
fileModTime time.Time

// Filestore support variables.
// ----------------------------
Expand Down Expand Up @@ -62,6 +65,12 @@ type DagBuilderParams struct {
// DAGService to write blocks to (required)
Dagserv ipld.DAGService

// The unixfs file mode
FileMode os.FileMode

// The unixfs last modified time
FileModTime time.Time

// NoCopy signals to the chunker that it should track fileinfo for
// filestore adds
NoCopy bool
Expand All @@ -71,11 +80,13 @@ type DagBuilderParams struct {
// chunker.Splitter as data source.
func (dbp *DagBuilderParams) New(spl chunker.Splitter) (*DagBuilderHelper, error) {
db := &DagBuilderHelper{
dserv: dbp.Dagserv,
spl: spl,
rawLeaves: dbp.RawLeaves,
cidBuilder: dbp.CidBuilder,
maxlinks: dbp.Maxlinks,
dserv: dbp.Dagserv,
spl: spl,
rawLeaves: dbp.RawLeaves,
cidBuilder: dbp.CidBuilder,
maxlinks: dbp.Maxlinks,
fileMode: dbp.FileMode,
fileModTime: dbp.FileModTime,
}
if fi, ok := spl.Reader().(files.FileInfo); dbp.NoCopy && ok {
db.fullPath = fi.AbsPath()
Expand Down Expand Up @@ -161,6 +172,7 @@ func (db *DagBuilderHelper) NewLeafNode(data []byte, fsNodeType pb.Data_DataType
// Encapsulate the data in UnixFS node (instead of a raw node).
fsNodeOverDag := db.NewFSNodeOverDag(fsNodeType)
fsNodeOverDag.SetFileData(data)

node, err := fsNodeOverDag.Commit()
if err != nil {
return nil, err
Expand All @@ -172,9 +184,9 @@ func (db *DagBuilderHelper) NewLeafNode(data []byte, fsNodeType pb.Data_DataType
return node, nil
}

// FillNodeLayer will add datanodes as children to the give node until
// FillNodeLayer will add data-nodes as children to the given node until
// it is full in this layer or no more data.
// NOTE: This function creates raw data nodes so it only works
// NOTE: This function creates raw data nodes, so it only works
// for the `trickle.Layout`.
func (db *DagBuilderHelper) FillNodeLayer(node *FSNodeOverDag) error {

Expand Down Expand Up @@ -267,6 +279,34 @@ func (db *DagBuilderHelper) Maxlinks() int {
return db.maxlinks
}

// HasFileAttributes will return false if Filestore is being used,
// otherwise returns true if a file mode or last modification time is set.
func (db *DagBuilderHelper) HasFileAttributes() bool {
return db.fullPath == "" && (db.fileMode != 0 || !db.fileModTime.IsZero())
}

// SetFileAttributes stores file attributes present in the `DagBuilderHelper`
// into the associated `ft.FSNode`.
func (db *DagBuilderHelper) SetFileAttributes(n ipld.Node) error {
if pn, ok := n.(*dag.ProtoNode); ok {
fsn, err := ft.FSNodeFromBytes(pn.Data())
if err != nil {
return err
}
fsn.SetModTime(db.fileModTime)
fsn.SetMode(db.fileMode)

d, err := fsn.GetBytes()
if err != nil {
return err
}

pn.SetData(d)
}

return nil
}

// FSNodeOverDag encapsulates an `unixfs.FSNode` that will be stored in a
// `dag.ProtoNode`. Instead of just having a single `ipld.Node` that
// would need to be constantly (un)packed to access and modify its
Expand All @@ -290,7 +330,7 @@ type FSNodeOverDag struct {
}

// NewFSNodeOverDag creates a new `dag.ProtoNode` and `ft.FSNode`
// decoupled from one onther (and will continue in that way until
// decoupled from one another (and will continue in that way until
// `Commit` is called), with `fsNodeType` specifying the type of
// the UnixFS layer node (either `File` or `Raw`).
func (db *DagBuilderHelper) NewFSNodeOverDag(fsNodeType pb.Data_DataType) *FSNodeOverDag {
Expand Down Expand Up @@ -376,6 +416,26 @@ func (n *FSNodeOverDag) SetFileData(fileData []byte) {
n.file.SetData(fileData)
}

// SetMode sets the file mode of the associated `ft.FSNode`.
func (n *FSNodeOverDag) SetMode(mode os.FileMode) {
n.file.SetMode(mode)
}

// SetModTime sets the file modification time of the associated `ft.FSNode`.
func (n *FSNodeOverDag) SetModTime(ts time.Time) {
n.file.SetModTime(ts)
}

// Mode returns the file mode of the associated `ft.FSNode`
func (n *FSNodeOverDag) Mode() os.FileMode {
return n.file.Mode()
}

// ModTime returns the last modification time of the associated `ft.FSNode`
func (n *FSNodeOverDag) ModTime() time.Time {
return n.file.ModTime()
}

// GetDagNode fills out the proper formatting for the FSNodeOverDag node
// inside of a DAG node and returns the dag node.
// TODO: Check if we have committed (passed the UnixFS information
Expand Down
Loading