Skip to content

Commit

Permalink
fix t0250 sharding and update unixfs with new-dir fix
Browse files Browse the repository at this point in the history
  • Loading branch information
schomatis committed May 19, 2021
1 parent 6bd9be9 commit 811ea25
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 9 deletions.
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ require (
github.com/ipfs/go-namesys v0.1.1
github.com/ipfs/go-path v0.0.9
github.com/ipfs/go-pinning-service-http-client v0.1.0
github.com/ipfs/go-unixfs v0.2.6
github.com/ipfs/go-unixfs v0.2.7-0.20210519135750-3befc7e347fa
github.com/ipfs/go-verifcid v0.0.1
github.com/ipfs/interface-go-ipfs-core v0.4.0
github.com/ipld/go-car v0.2.2
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -494,6 +494,8 @@ github.com/ipfs/go-unixfs v0.2.5 h1:irj/WzIcgTBay48mSMUYDbKlIzIocXWcuUUsi5qOMOE=
github.com/ipfs/go-unixfs v0.2.5/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw=
github.com/ipfs/go-unixfs v0.2.6 h1:gq3U3T2vh8x6tXhfo3uSO3n+2z4yW0tYtNgVP/3sIyA=
github.com/ipfs/go-unixfs v0.2.6/go.mod h1:GTTzQvaZsTZARdNkkdjDKFFnBhmO3e5mIM1PkH/x4p0=
github.com/ipfs/go-unixfs v0.2.7-0.20210519135750-3befc7e347fa h1:b2JM6ADtCk9+DgUmhwR15oxQSnyEucqNssy6GRfm+J4=
github.com/ipfs/go-unixfs v0.2.7-0.20210519135750-3befc7e347fa/go.mod h1:GTTzQvaZsTZARdNkkdjDKFFnBhmO3e5mIM1PkH/x4p0=
github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E=
github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0=
github.com/ipfs/interface-go-ipfs-core v0.4.0 h1:+mUiamyHIwedqP8ZgbCIwpy40oX7QcXUbo4CZOeJVJg=
Expand Down
32 changes: 24 additions & 8 deletions test/sharness/t0250-files-api.sh
Original file line number Diff line number Diff line change
Expand Up @@ -45,21 +45,37 @@ verify_dir_contents() {
'
}

# Same as `t0260-sharding.sh`, we need a big directory now to trigger sharding.
test_expect_success "set up test data" '
mkdir big_dir
for i in `seq 6500` # just to be sure
do
echo $i > big_dir/`printf "file%06d" $i` # fixed length of 10 chars
done
'

test_sharding() {
local EXTRA ARGS
EXTRA=$1
ARGS=$2 # only applied to the initial directory

test_expect_success "make a directory $EXTRA" '
ipfs files mkdir $ARGS /foo
ipfs add $ARGS -r -q big_dir | tail -n1 > sharddir_cid &&
ipfs files cp /ipfs/`cat sharddir_cid` /foo
'

test_expect_success "can make 100 files in a directory $EXTRA" '
printf "" > list_exp_raw
for i in `seq 100 -1 1`
# Already present in the directory to trigger sharding
for i in `seq 6500` # just to be sure
do
echo `printf "file%06d" $i` >> list_exp_raw
done
for i in `seq 10100 -1 10000`
do
echo $i | ipfs files write --create /foo/file$i || return 1
echo file$i >> list_exp_raw
echo $i | ipfs files write --create /foo/`printf "file%06d" $i` || return 1
echo `printf "file%06d" $i` >> list_exp_raw
done
'
# Create the files in reverse (unsorted) order (`seq 100 -1 1`)
Expand All @@ -80,13 +96,13 @@ test_sharding() {
'

test_expect_success "can read a file from sharded directory $EXTRA" '
ipfs files read /foo/file65 > file_out &&
ipfs files read /foo/file000065 > file_out &&
echo "65" > file_exp &&
test_cmp file_out file_exp
'

test_expect_success "can pin a file from sharded directory $EXTRA" '
ipfs files stat --hash /foo/file42 > pin_file_hash &&
ipfs files stat --hash /foo/file000042 > pin_file_hash &&
ipfs pin add < pin_file_hash > pin_hash
'

Expand Down Expand Up @@ -780,10 +796,10 @@ test_expect_success "enable sharding in config" '

test_launch_ipfs_daemon --offline

SHARD_HASH=QmPkwLJTYZRGPJ8Lazr9qPdrLmswPtUjaDbEpmR9jEh1se
SHARD_HASH=QmWSqxvnnHh6NL9ctzpwMiWg7iyCKTfcvLoQbCqC7qPQHw
test_sharding "(cidv0)"

SHARD_HASH=bafybeib46tpawg2d2hhlmmn2jvgio33wqkhlehxrem7wbfvqqikure37rm
SHARD_HASH=bafybeid7vaa7ywcd3mgrkdohpyqm5wxtmkwovyvxftzio3wuie3vlx4tcq
test_sharding "(cidv1 root)" "--cid-version=1"

test_kill_ipfs_daemon
Expand Down

0 comments on commit 811ea25

Please sign in to comment.