Skip to content
This repository has been archived by the owner on Jun 27, 2023. It is now read-only.

Support unixfs 1.5 inline metadata #117

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
96 changes: 94 additions & 2 deletions importer/balanced/balanced_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,15 @@ import (
"io"
"io/ioutil"
mrand "math/rand"
"os"
"testing"
"time"

h "github.com/ipfs/go-unixfs/importer/helpers"
uio "github.com/ipfs/go-unixfs/io"

chunker "github.com/ipfs/go-ipfs-chunker"
files "github.com/ipfs/go-ipfs-files"
u "github.com/ipfs/go-ipfs-util"
ipld "github.com/ipfs/go-ipld-format"
dag "github.com/ipfs/go-merkledag"
Expand All @@ -27,6 +30,10 @@ func buildTestDag(ds ipld.DAGService, spl chunker.Splitter) (*dag.ProtoNode, err
Maxlinks: h.DefaultLinksPerBlock,
}

return buildTestDagWithParams(ds, spl, dbp)
}

func buildTestDagWithParams(ds ipld.DAGService, spl chunker.Splitter, dbp h.DagBuilderParams) (*dag.ProtoNode, error) {
db, err := dbp.New(spl)
if err != nil {
return nil, err
Expand All @@ -53,7 +60,7 @@ func getTestDag(t *testing.T, ds ipld.DAGService, size int64, blksize int64) (*d
return nd, data
}

//Test where calls to read are smaller than the chunk size
// Test where calls to read are smaller than the chunk size
func TestSizeBasedSplit(t *testing.T) {
if testing.Short() {
t.SkipNow()
Expand Down Expand Up @@ -299,7 +306,6 @@ func TestSeekingStress(t *testing.T) {
t.Fatal(err)
}
}

}

func TestSeekingConsistency(t *testing.T) {
Expand Down Expand Up @@ -337,3 +343,89 @@ func TestSeekingConsistency(t *testing.T) {
t.Fatal(err)
}
}

func TestMetadata(t *testing.T) {
nbytes := 3 * chunker.DefaultBlockSize
buf := new(bytes.Buffer)
io.CopyN(buf, u.NewTimeSeededRand(), int64(nbytes))

dagserv := mdtest.Mock()
dbp := h.DagBuilderParams{
Dagserv: dagserv,
Maxlinks: h.DefaultLinksPerBlock,
FileMode: 0o522,
ModTime: time.Unix(1638111600, 76552),
}

nd, err := buildTestDagWithParams(dagserv, chunker.DefaultSplitter(buf), dbp)
if err != nil {
t.Fatal(err)
}

dr, err := uio.NewDagReader(context.Background(), nd, dagserv)
if err != nil {
t.Fatal(err)
}

if !dr.ModTime().Equal(dbp.ModTime) {
t.Errorf("got modtime %v, wanted %v", dr.ModTime(), dbp.ModTime)
}

if dr.FileMode() != dbp.FileMode {
t.Errorf("got filemode %o, wanted %o", dr.FileMode(), dbp.FileMode)
}
}

type fileinfo struct {
name string
size int64
mode os.FileMode
mtime time.Time
}

func (fi *fileinfo) Name() string { return fi.name }
func (fi *fileinfo) Size() int64 { return fi.size }
func (fi *fileinfo) Mode() os.FileMode { return fi.mode }
func (fi *fileinfo) ModTime() time.Time { return fi.mtime }
func (fi *fileinfo) IsDir() bool { return false }
func (fi *fileinfo) Sys() interface{} { return nil }

func TestMetadataFromFilestore(t *testing.T) {
nbytes := 3 * chunker.DefaultBlockSize
buf := new(bytes.Buffer)
io.CopyN(buf, u.NewTimeSeededRand(), int64(nbytes))

fi := &fileinfo{
mode: 0o522,
mtime: time.Unix(1638111600, 76552),
}

rpf, err := files.NewReaderPathFile("/path", ioutil.NopCloser(buf), fi)
if err != nil {
t.Fatalf("new reader path file: %v", err)
}

dagserv := mdtest.Mock()
dbp := h.DagBuilderParams{
Dagserv: dagserv,
Maxlinks: h.DefaultLinksPerBlock,
NoCopy: true,
}
nd, err := buildTestDagWithParams(dagserv, chunker.DefaultSplitter(rpf), dbp)
if err != nil {
t.Fatal(err)
}

dr, err := uio.NewDagReader(context.Background(), nd, dagserv)
if err != nil {
t.Fatal(err)
}

if !dr.ModTime().Equal(fi.mtime) {
t.Errorf("got modtime %v, wanted %v", dr.ModTime(), fi.mtime)
}

if dr.FileMode() != fi.mode {
t.Errorf("got filemode %o, wanted %o", dr.FileMode(), fi.mode)
}
}
32 changes: 22 additions & 10 deletions importer/balanced/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,10 +163,22 @@ func Layout(db *h.DagBuilderHelper) (ipld.Node, error) {
// Fill the `newRoot` (that has the old `root` already as child)
// and make it the current `root` for the next iteration (when
// it will become "old").
root, fileSize, err = fillNodeRec(db, newRoot, depth)
var potentialRoot *h.FSNodeOverDag
potentialRoot, fileSize, err = fillNodeRec(db, newRoot, depth)
if err != nil {
return nil, err
}

// Only add file metadata to the top level root
if db.Done() {
db.FillMetadata(potentialRoot)
}

root, err = potentialRoot.Commit()
if err != nil {
return nil, err
}

}

return root, db.Add(root)
Expand Down Expand Up @@ -212,7 +224,7 @@ func Layout(db *h.DagBuilderHelper) (ipld.Node, error) {
// seeking through the DAG when reading data later).
//
// warning: **children** pinned indirectly, but input node IS NOT pinned.
func fillNodeRec(db *h.DagBuilderHelper, node *h.FSNodeOverDag, depth int) (filledNode ipld.Node, nodeFileSize uint64, err error) {
func fillNodeRec(db *h.DagBuilderHelper, node *h.FSNodeOverDag, depth int) (filledNode *h.FSNodeOverDag, nodeFileSize uint64, err error) {
if depth < 1 {
return nil, 0, errors.New("attempt to fillNode at depth < 1")
}
Expand Down Expand Up @@ -240,10 +252,16 @@ func fillNodeRec(db *h.DagBuilderHelper, node *h.FSNodeOverDag, depth int) (fill
} else {
// Recursion case: create an internal node to in turn keep
// descending in the DAG and adding child nodes to it.
childNode, childFileSize, err = fillNodeRec(db, nil, depth-1)
var internalNode *h.FSNodeOverDag
internalNode, childFileSize, err = fillNodeRec(db, nil, depth-1)
if err != nil {
return nil, 0, err
}
childNode, err = internalNode.Commit()
if err != nil {
return nil, 0, err
}

}

err = node.AddChild(childNode, childFileSize, db)
Expand All @@ -254,11 +272,5 @@ func fillNodeRec(db *h.DagBuilderHelper, node *h.FSNodeOverDag, depth int) (fill

nodeFileSize = node.FileSize()

// Get the final `dag.ProtoNode` with the `FSNode` data encoded inside.
filledNode, err = node.Commit()
if err != nil {
return nil, 0, err
}

return filledNode, nodeFileSize, nil
return node, nodeFileSize, nil
}
56 changes: 54 additions & 2 deletions importer/helpers/dagbuilder.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"errors"
"io"
"os"
"time"

dag "github.com/ipfs/go-merkledag"

Expand All @@ -30,6 +31,8 @@ type DagBuilderHelper struct {
nextData []byte // the next item to return.
maxlinks int
cidBuilder cid.Builder
modTime time.Time
fileMode os.FileMode

// Filestore support variables.
// ----------------------------
Expand Down Expand Up @@ -65,6 +68,14 @@ type DagBuilderParams struct {
// NoCopy signals to the chunker that it should track fileinfo for
// filestore adds
NoCopy bool

// ModTime is the optional file modification tiome to be embedded in the final dag
// Note that this will be overwritten by the fileinfo of the underlying filestore file if NoCopy is true
ModTime time.Time

// FileMode is the optional file mode metadata to be embedded in the final dag
// Note that this will be overwritten by the fileinfo of the underlying filestore file if NoCopy is true
FileMode os.FileMode
}

// New generates a new DagBuilderHelper from the given params and a given
Expand All @@ -76,10 +87,16 @@ func (dbp *DagBuilderParams) New(spl chunker.Splitter) (*DagBuilderHelper, error
rawLeaves: dbp.RawLeaves,
cidBuilder: dbp.CidBuilder,
maxlinks: dbp.Maxlinks,
modTime: dbp.ModTime,
fileMode: dbp.FileMode,
}
if fi, ok := spl.Reader().(files.FileInfo); dbp.NoCopy && ok {
db.fullPath = fi.AbsPath()
db.stat = fi.Stat()
if db.stat != nil {
db.modTime = db.stat.ModTime()
db.fileMode = db.stat.Mode()
}
}

if dbp.NoCopy && db.fullPath == "" { // Enforce NoCopy
Expand Down Expand Up @@ -177,7 +194,6 @@ func (db *DagBuilderHelper) NewLeafNode(data []byte, fsNodeType pb.Data_DataType
// NOTE: This function creates raw data nodes so it only works
// for the `trickle.Layout`.
func (db *DagBuilderHelper) FillNodeLayer(node *FSNodeOverDag) error {

// while we have room AND we're not done
for node.NumChildren() < db.maxlinks && !db.Done() {
child, childFileSize, err := db.NewLeafDataNode(ft.TRaw)
Expand Down Expand Up @@ -232,7 +248,7 @@ func (db *DagBuilderHelper) NewLeafDataNode(fsNodeType pb.Data_DataType) (node i
// offset is more related to this function).
func (db *DagBuilderHelper) ProcessFileStore(node ipld.Node, dataSize uint64) ipld.Node {
// Check if Filestore is being used.
if db.fullPath != "" {
if db.isUsingFilestore() {
// Check if the node is actually a raw node (needed for
// Filestore support).
if _, ok := node.(*dag.RawNode); ok {
Expand Down Expand Up @@ -267,6 +283,18 @@ func (db *DagBuilderHelper) Maxlinks() int {
return db.maxlinks
}

func (db *DagBuilderHelper) isUsingFilestore() bool {
return db.fullPath != ""
}

// FillMetadata sets metadata attributes on the supplied node.
func (db *DagBuilderHelper) FillMetadata(node *FSNodeOverDag) error {
node.SetFileMode(db.fileMode)
node.SetModTime(db.modTime)

return nil
}

// FSNodeOverDag encapsulates an `unixfs.FSNode` that will be stored in a
// `dag.ProtoNode`. Instead of just having a single `ipld.Node` that
// would need to be constantly (un)packed to access and modify its
Expand Down Expand Up @@ -398,3 +426,27 @@ func (n *FSNodeOverDag) GetChild(ctx context.Context, i int, ds ipld.DAGService)

return NewFSNFromDag(pbn)
}

// FileMode returns the file mode bits from the underlying
// representation of the `ft.FSNode`.
func (n *FSNodeOverDag) FileMode() os.FileMode {
return n.file.FileMode()
}

// SetFileMode sets the file mode bits in the underlying
// representation of the `ft.FSNode`.
func (n *FSNodeOverDag) SetFileMode(m os.FileMode) {
n.file.SetFileMode(m)
}

// ModTime returns the modification time of the file from the underlying
// representation of the `ft.FSNode`.
func (n *FSNodeOverDag) ModTime() time.Time {
return n.file.ModTime()
}

// SetModTime sets the modification time of the file in the underlying
// representation of the `ft.FSNode`.
func (n *FSNodeOverDag) SetModTime(t time.Time) {
n.file.SetModTime(t)
}
Loading