Skip to content

Commit

Permalink
Add func to get total tar size
Browse files Browse the repository at this point in the history
License: MIT
Signed-off-by: rht <rhtbot@gmail.com>
  • Loading branch information
rht committed Aug 11, 2015
1 parent db7e0dc commit 178b568
Show file tree
Hide file tree
Showing 2 changed files with 49 additions and 23 deletions.
9 changes: 4 additions & 5 deletions unixfs/io/dagreader.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,7 @@ type ReadSeekCloser interface {
// node, using the passed in DAGService for data retreival
func NewDagReader(ctx context.Context, n *mdag.Node, serv mdag.DAGService) (*DagReader, error) {
pb := new(ftpb.Data)
err := proto.Unmarshal(n.Data, pb)
if err != nil {
if err := proto.Unmarshal(n.Data, pb); err != nil {
return nil, err
}

Expand All @@ -70,7 +69,7 @@ func NewDagReader(ctx context.Context, n *mdag.Node, serv mdag.DAGService) (*Dag
case ftpb.Data_Raw:
fallthrough
case ftpb.Data_File:
return newDataFileReader(ctx, n, pb, serv), nil
return NewDataFileReader(ctx, n, pb, serv), nil
case ftpb.Data_Metadata:
if len(n.Links) == 0 {
return nil, errors.New("incorrectly formatted metadata object")
Expand All @@ -85,7 +84,7 @@ func NewDagReader(ctx context.Context, n *mdag.Node, serv mdag.DAGService) (*Dag
}
}

func newDataFileReader(ctx context.Context, n *mdag.Node, pb *ftpb.Data, serv mdag.DAGService) *DagReader {
func NewDataFileReader(ctx context.Context, n *mdag.Node, pb *ftpb.Data, serv mdag.DAGService) *DagReader {
fctx, cancel := context.WithCancel(ctx)
promises := serv.GetDAG(fctx, n)
return &DagReader{
Expand Down Expand Up @@ -124,7 +123,7 @@ func (dr *DagReader) precalcNextBuf(ctx context.Context) error {
// A directory should not exist within a file
return ft.ErrInvalidDirLocation
case ftpb.Data_File:
dr.buf = newDataFileReader(dr.ctx, nxt, pb, dr.serv)
dr.buf = NewDataFileReader(dr.ctx, nxt, pb, dr.serv)
return nil
case ftpb.Data_Raw:
dr.buf = NewRSNCFromBytes(pb.GetData())
Expand Down
63 changes: 45 additions & 18 deletions unixfs/tar/writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"archive/tar"
"bufio"
"compress/gzip"
"fmt"
"io"
"path"
"time"
Expand All @@ -13,10 +12,13 @@ import (
cxt "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"

mdag "github.com/ipfs/go-ipfs/merkledag"
ft "github.com/ipfs/go-ipfs/unixfs"
uio "github.com/ipfs/go-ipfs/unixfs/io"
upb "github.com/ipfs/go-ipfs/unixfs/pb"
)

const tarBlockSize = 512

// DefaultBufSize is the buffer size for gets. for now, 1MB, which is ~4 blocks.
// TODO: does this need to be configurable?
var DefaultBufSize = 1048576
Expand All @@ -39,7 +41,7 @@ func DagArchive(ctx cxt.Context, nd *mdag.Node, name string, dag mdag.DAGService

// write all the nodes recursively
go func() {
if err := w.WriteNode(nd, filename); err != nil {
if err := w.NodeWalk(nd, filename); err != nil {
pipew.CloseWithError(err)
return
}
Expand All @@ -66,9 +68,9 @@ type Writer struct {
w io.Writer
ctx cxt.Context

Close func() error
WriteFile func(*uio.DagReader, string) error
HandleDir func(string) error
Close func() error
HandleFile func(*mdag.Node, *upb.Data, string) error
HandleDir func(string) error
}

// NewWriter wraps given io.Writer.
Expand All @@ -89,7 +91,8 @@ func NewWriter(ctx cxt.Context, dag mdag.DAGService, archive bool, compression i

if !archive && compression != gzip.NoCompression {
// gz only case
w.WriteFile = func(dagr *uio.DagReader, fpath string) error {
w.HandleFile = func(nd *mdag.Node, pb *upb.Data, fpath string) error {
dagr := uio.NewDataFileReader(w.ctx, nd, pb, w.Dag)
_, err := dagr.WriteTo(w.w)
return err
}
Expand All @@ -102,10 +105,12 @@ func NewWriter(ctx cxt.Context, dag mdag.DAGService, archive bool, compression i
// construct the tar writer
w.TarW = tar.NewWriter(w.w)

w.WriteFile = func(dagr *uio.DagReader, fpath string) error {
if err := writeFileHeader(w.TarW, fpath, dagr.Size()); err != nil {
w.HandleFile = func(nd *mdag.Node, pb *upb.Data, fpath string) error {
if err := writeFileHeader(w.TarW, fpath, pb.GetFilesize()); err != nil {
return err
}

dagr := uio.NewDataFileReader(w.ctx, nd, pb, w.Dag)
_, err := dagr.WriteTo(w.TarW)
return err
}
Expand All @@ -121,8 +126,10 @@ func NewWriter(ctx cxt.Context, dag mdag.DAGService, archive bool, compression i
}

func (w *Writer) writeDir(nd *mdag.Node, fpath string) error {
if err := w.HandleDir(fpath); err != nil {
return err
if w.HandleDir != nil {
if err := w.HandleDir(fpath); err != nil {
return err
}
}

for i, ng := range w.Dag.GetDAG(w.ctx, nd) {
Expand All @@ -132,32 +139,52 @@ func (w *Writer) writeDir(nd *mdag.Node, fpath string) error {
}

npath := path.Join(fpath, nd.Links[i].Name)
if err := w.WriteNode(child, npath); err != nil {
if err := w.NodeWalk(child, npath); err != nil {
return err
}
}

return nil
}

func (w *Writer) WriteNode(nd *mdag.Node, fpath string) error {
func (w *Writer) NodeWalk(nd *mdag.Node, fpath string) error {
pb := new(upb.Data)
if err := proto.Unmarshal(nd.Data, pb); err != nil {
return err
}

switch pb.GetType() {
case upb.Data_Metadata:
fallthrough
case upb.Data_Directory:
return w.writeDir(nd, fpath)
case upb.Data_Raw:
fallthrough
case upb.Data_File:
dagr, err := uio.NewDagReader(w.ctx, nd, w.Dag)
if err != nil {
return err
}
return w.WriteFile(dagr, fpath)
return w.HandleFile(nd, pb, fpath)
default:
return fmt.Errorf("unixfs type not supported: %s", pb.GetType())
return ft.ErrUnrecognizedType
}
}

func GetTarSize(ctx cxt.Context, nd *mdag.Node, dag mdag.DAGService) (uint64, error) {
size := 2 * uint64(tarBlockSize) // tar root padding

w := &Writer{
Dag: dag,
ctx: ctx,
}

w.HandleFile = func(nd *mdag.Node, pb *upb.Data, fpath string) error {
unixSize := pb.GetFilesize()
size += tarBlockSize + unixSize + (tarBlockSize - unixSize%tarBlockSize)
return nil
}

if err := w.NodeWalk(nd, ""); err != nil {
return 0, err
}
return size, nil
}

func writeDirHeader(w *tar.Writer, fpath string) error {
Expand Down

0 comments on commit 178b568

Please sign in to comment.