Skip to content

Commit

Permalink
Merge pull request #3996 from ipfs/kevina/raw-nodes-fixes
Browse files Browse the repository at this point in the history
Add better support for Raw Nodes in MFS and elsewhere
  • Loading branch information
whyrusleeping authored Jun 28, 2017
2 parents 628245f + b3828f4 commit 07162dd
Show file tree
Hide file tree
Showing 3 changed files with 102 additions and 42 deletions.
62 changes: 45 additions & 17 deletions test/sharness/t0250-files-api.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,16 @@ test_description="test the unix files api"

test_init_ipfs

# setup files for testing
test_expect_success "can create some files for testing" '
FILE1=$(echo foo | ipfs add -q) &&
FILE2=$(echo bar | ipfs add -q) &&
FILE3=$(echo baz | ipfs add -q) &&
mkdir stuff_test &&
create_files() {
FILE1=$(echo foo | ipfs add "$@" -q) &&
FILE2=$(echo bar | ipfs add "$@" -q) &&
FILE3=$(echo baz | ipfs add "$@" -q) &&
mkdir -p stuff_test &&
echo cats > stuff_test/a &&
echo dogs > stuff_test/b &&
echo giraffes > stuff_test/c &&
DIR1=$(ipfs add -q stuff_test | tail -n1)
'
DIR1=$(ipfs add -r "$@" -q stuff_test | tail -n1)
}

verify_path_exists() {
# simply running ls on a file should be a good 'check'
Expand Down Expand Up @@ -90,6 +89,8 @@ test_sharding() {
}

test_files_api() {
ROOT_HASH=$1

test_expect_success "can mkdir in root" '
ipfs files mkdir /cats
'
Expand Down Expand Up @@ -159,6 +160,12 @@ test_files_api() {
verify_dir_contents /cats file1
'

test_expect_success "file has correct hash and size in directory" '
echo "file1 $FILE1 4" > ls_l_expected &&
ipfs files ls -l /cats > ls_l_actual &&
test_cmp ls_l_expected ls_l_actual
'

test_expect_success "can read file" '
ipfs files read /cats/file1 > file1out
'
Expand Down Expand Up @@ -402,15 +409,15 @@ test_files_api() {
test_expect_success "root hash not bubbled up yet" '
test -z "$ONLINE" ||
(ipfs refs local > refsout &&
test_expect_code 1 grep QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt refsout)
test_expect_code 1 grep $ROOT_HASH refsout)
'

test_expect_success "changes bubbled up to root on inspection" '
ipfs files stat --hash / > root_hash
'

test_expect_success "root hash looks good" '
export EXP_ROOT_HASH="QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt" &&
export EXP_ROOT_HASH="$ROOT_HASH" &&
echo $EXP_ROOT_HASH > root_hash_exp &&
test_cmp root_hash_exp root_hash
'
Expand Down Expand Up @@ -521,26 +528,47 @@ test_files_api() {
ipfs files rm -r /foobar &&
ipfs files rm -r /adir
'

test_expect_success "root mfs entry is empty" '
verify_dir_contents /
'

test_expect_success "repo gc" '
ipfs repo gc
'
}

# test offline and online
test_files_api
test_expect_success "can create some files for testing" '
create_files
'
test_files_api QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt

test_expect_success "clean up objects from previous test run" '
ipfs repo gc
test_expect_success "can create some files for testing with raw-leaves" '
create_files --raw-leaves
'
test_files_api QmTpKiKcAj4sbeesN6vrs5w3QeVmd4QmGpxRL81hHut4dZ

test_launch_ipfs_daemon
test_launch_ipfs_daemon --offline

ONLINE=1 # set online flag so tests can easily tell
test_files_api
test_kill_ipfs_daemon
test_expect_success "can create some files for testing" '
create_files
'
test_files_api QmcwKfTMCT7AaeiD92hWjnZn9b6eh9NxnhfSzN5x2vnDpt

test_expect_success "can create some files for testing with raw-leaves" '
create_files --raw-leaves
'
test_files_api QmTpKiKcAj4sbeesN6vrs5w3QeVmd4QmGpxRL81hHut4dZ

test_kill_ipfs_daemon --offline

test_expect_success "enable sharding in config" '
ipfs config --json Experimental.ShardingEnabled true
'

test_launch_ipfs_daemon
test_launch_ipfs_daemon --offline
test_sharding
test_kill_ipfs_daemon

Expand Down
79 changes: 55 additions & 24 deletions unixfs/mod/dagmodifier.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"bytes"
"context"
"errors"
"fmt"
"io"

chunk "github.com/ipfs/go-ipfs/importer/chunk"
Expand All @@ -29,7 +30,7 @@ var writebufferSize = 1 << 21
// Dear god, please rename this to something more pleasant
type DagModifier struct {
dagserv mdag.DAGService
curNode *mdag.ProtoNode
curNode node.Node

splitter chunk.SplitterGen
ctx context.Context
Expand All @@ -42,14 +43,18 @@ type DagModifier struct {
read uio.DagReader
}

var ErrNotUnixfs = fmt.Errorf("dagmodifier only supports unixfs nodes (proto or raw)")

func NewDagModifier(ctx context.Context, from node.Node, serv mdag.DAGService, spl chunk.SplitterGen) (*DagModifier, error) {
pbn, ok := from.(*mdag.ProtoNode)
if !ok {
return nil, mdag.ErrNotProtobuf
switch from.(type) {
case *mdag.ProtoNode, *mdag.RawNode:
// ok
default:
return nil, ErrNotUnixfs
}

return &DagModifier{
curNode: pbn.Copy().(*mdag.ProtoNode),
curNode: from.Copy(),
dagserv: serv,
splitter: spl,
ctx: ctx,
Expand Down Expand Up @@ -144,19 +149,29 @@ func (dm *DagModifier) Write(b []byte) (int, error) {
return n, nil
}

func (dm *DagModifier) Size() (int64, error) {
pbn, err := ft.FromBytes(dm.curNode.Data())
if err != nil {
return 0, err
}
var ErrNoRawYet = fmt.Errorf("currently only fully support protonodes in the dagmodifier")

if dm.wrBuf != nil {
if uint64(dm.wrBuf.Len())+dm.writeStart > pbn.GetFilesize() {
// Size returns the Filesize of the node
func (dm *DagModifier) Size() (int64, error) {
switch nd := dm.curNode.(type) {
case *mdag.ProtoNode:
pbn, err := ft.FromBytes(nd.Data())
if err != nil {
return 0, err
}
if dm.wrBuf != nil && uint64(dm.wrBuf.Len())+dm.writeStart > pbn.GetFilesize() {
return int64(dm.wrBuf.Len()) + int64(dm.writeStart), nil
}
return int64(pbn.GetFilesize()), nil
case *mdag.RawNode:
if dm.wrBuf != nil {
return 0, ErrNoRawYet
}
sz, err := nd.Size()
return int64(sz), err
default:
return 0, ErrNotUnixfs
}

return int64(pbn.GetFilesize()), nil
}

// Sync writes changes to this dag to disk
Expand Down Expand Up @@ -222,7 +237,12 @@ func (dm *DagModifier) Sync() error {
// modifyDag writes the data in 'data' over the data in 'node' starting at 'offset'
// returns the new key of the passed in node and whether or not all the data in the reader
// has been consumed.
func (dm *DagModifier) modifyDag(node *mdag.ProtoNode, offset uint64, data io.Reader) (*cid.Cid, bool, error) {
func (dm *DagModifier) modifyDag(n node.Node, offset uint64, data io.Reader) (*cid.Cid, bool, error) {
node, ok := n.(*mdag.ProtoNode)
if !ok {
return nil, false, ErrNoRawYet
}

f, err := ft.FromBytes(node.Data())
if err != nil {
return nil, false, err
Expand Down Expand Up @@ -301,13 +321,19 @@ func (dm *DagModifier) modifyDag(node *mdag.ProtoNode, offset uint64, data io.Re
}

// appendData appends the blocks from the given chan to the end of this dag
func (dm *DagModifier) appendData(node *mdag.ProtoNode, spl chunk.Splitter) (node.Node, error) {
dbp := &help.DagBuilderParams{
Dagserv: dm.dagserv,
Maxlinks: help.DefaultLinksPerBlock,
func (dm *DagModifier) appendData(nd node.Node, spl chunk.Splitter) (node.Node, error) {
switch nd := nd.(type) {
case *mdag.ProtoNode:
dbp := &help.DagBuilderParams{
Dagserv: dm.dagserv,
Maxlinks: help.DefaultLinksPerBlock,
}
return trickle.TrickleAppend(dm.ctx, nd, dbp.New(spl))
case *mdag.RawNode:
return nil, fmt.Errorf("appending to raw node types not yet supported")
default:
return nil, ErrNotUnixfs
}

return trickle.TrickleAppend(dm.ctx, node, dbp.New(spl))
}

// Read data from this dag starting at the current offset
Expand Down Expand Up @@ -367,12 +393,12 @@ func (dm *DagModifier) CtxReadFull(ctx context.Context, b []byte) (int, error) {
}

// GetNode gets the modified DAG Node
func (dm *DagModifier) GetNode() (*mdag.ProtoNode, error) {
func (dm *DagModifier) GetNode() (node.Node, error) {
err := dm.Sync()
if err != nil {
return nil, err
}
return dm.curNode.Copy().(*mdag.ProtoNode), nil
return dm.curNode.Copy(), nil
}

// HasChanges returned whether or not there are unflushed changes to this dag
Expand Down Expand Up @@ -452,7 +478,12 @@ func (dm *DagModifier) Truncate(size int64) error {
}

// dagTruncate truncates the given node to 'size' and returns the modified Node
func dagTruncate(ctx context.Context, nd *mdag.ProtoNode, size uint64, ds mdag.DAGService) (*mdag.ProtoNode, error) {
func dagTruncate(ctx context.Context, n node.Node, size uint64, ds mdag.DAGService) (*mdag.ProtoNode, error) {
nd, ok := n.(*mdag.ProtoNode)
if !ok {
return nil, ErrNoRawYet
}

if len(nd.Links()) == 0 {
// TODO: this can likely be done without marshaling and remarshaling
pbn, err := ft.FromBytes(nd.Data())
Expand Down
3 changes: 2 additions & 1 deletion unixfs/mod/dagmodifier_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (

h "github.com/ipfs/go-ipfs/importer/helpers"
trickle "github.com/ipfs/go-ipfs/importer/trickle"
mdag "github.com/ipfs/go-ipfs/merkledag"
ft "github.com/ipfs/go-ipfs/unixfs"
uio "github.com/ipfs/go-ipfs/unixfs/io"
testu "github.com/ipfs/go-ipfs/unixfs/test"
Expand Down Expand Up @@ -105,7 +106,7 @@ func TestDagModifierBasic(t *testing.T) {
t.Fatal(err)
}

size, err := ft.DataSize(node.Data())
size, err := ft.DataSize(node.(*mdag.ProtoNode).Data())
if err != nil {
t.Fatal(err)
}
Expand Down

0 comments on commit 07162dd

Please sign in to comment.