From dd4fb36c3510e53d1ac7c27eed9c646d918da3c0 Mon Sep 17 00:00:00 2001 From: b5 Date: Thu, 3 Oct 2019 12:51:19 -0400 Subject: [PATCH] feat(logbook): add logbook inspection structs & methods working in flatbuffers brings the burden of needing diagnostic struct methods. Might as well lay the foundations for a logbook subcommand while we're at it also, fixes a number of bugs, adds a bunch of tests. --- actions/dataset_test.go | 2 +- logbook/log/log.go | 78 ++++---- logbook/log/log_test.go | 2 +- logbook/logbook.go | 265 ++++++++++++++++++++------ logbook/logbook_test.go | 402 ++++++++++++++++++++++++++++++++++++++-- 5 files changed, 634 insertions(+), 115 deletions(-) diff --git a/actions/dataset_test.go b/actions/dataset_test.go index c095048f8..cff472864 100644 --- a/actions/dataset_test.go +++ b/actions/dataset_test.go @@ -96,11 +96,11 @@ func TestAddDataset(t *testing.T) { func TestDataset(t *testing.T) { rmf := func(t *testing.T) repo.Repo { store := cafs.NewMapstore() + testPeerProfile.PrivKey = privKey mr, err := repo.NewMemRepo(testPeerProfile, store, qfs.NewMemFS(), profile.NewMemStore()) if err != nil { panic(err) } - // mr.SetPrivateKey(privKey) return mr } DatasetTests(t, rmf) diff --git a/logbook/log/log.go b/logbook/log/log.go index d22ebc0a7..07e5c92d2 100644 --- a/logbook/log/log.go +++ b/logbook/log/log.go @@ -33,9 +33,9 @@ import ( // authors, forming a conflict-free replicated data type (CRDT), and a basis // for collaboration through knowledge of each other's operations type Book struct { - authorname string - id string pk crypto.PrivKey + id string + authorname string logs map[uint32][]*Log } @@ -43,6 +43,7 @@ type Book struct { func NewBook(pk crypto.PrivKey, authorname, authorID string) (*Book, error) { return &Book{ pk: pk, + id: authorID, authorname: authorname, logs: map[uint32][]*Log{}, }, nil @@ -63,6 +64,11 @@ func (book *Book) AppendLog(l *Log) { book.logs[l.Model()] = append(book.logs[l.Model()], l) } +// Logs returns the full map of logs keyed by model type +func (book *Book) Logs() map[uint32][]*Log { + return book.logs +} + // ModelLogs gives all sets whoe model type matches model func (book *Book) ModelLogs(model uint32) []*Log { return book.logs[model] @@ -164,8 +170,8 @@ func (book Book) marshalFlatbuffer(builder *flatbuffers.Builder) flatbuffers.UOf func (book *Book) unmarshalFlatbuffer(b *logfb.Book) error { newBook := Book{ pk: book.pk, - authorname: string(b.Name()), id: string(b.Identifier()), + authorname: string(b.Name()), logs: map[uint32][]*Log{}, } @@ -195,15 +201,15 @@ func (book Book) logsSlice() (logs []*Log) { // Log is a causally-ordered set of operations performed by a single author. // log attribution is verified by an author's signature type Log struct { - signature []byte - ops []Op - logs []*Log + Signature []byte + Ops []Op + Logs []*Log } // InitLog creates a Log from an initialization operation func InitLog(initop Op) *Log { return &Log{ - ops: []Op{initop}, + Ops: []Op{initop}, } } @@ -216,12 +222,7 @@ func FromFlatbufferBytes(data []byte) (*Log, error) { // Append adds an operation to the log func (lg *Log) Append(op Op) { - lg.ops = append(lg.ops, op) -} - -// Len returns the number of of the latest entry in the log -func (lg Log) Len() int { - return len(lg.ops) + lg.Ops = append(lg.Ops, op) } // Model gives the operation type for a log, based on the first operation @@ -229,12 +230,12 @@ func (lg Log) Len() int { // first operation written to a log determines the kind of log for // catagorization purposes func (lg Log) Model() uint32 { - return lg.ops[0].Model + return lg.Ops[0].Model } // Author returns the name and identifier this log is attributed to func (lg Log) Author() (name, identifier string) { - return lg.ops[0].Name, lg.ops[0].AuthorID + return lg.Ops[0].Name, lg.Ops[0].AuthorID } // Name returns the human-readable name for this log, determined by the @@ -242,12 +243,12 @@ func (lg Log) Author() (name, identifier string) { // TODO (b5) - name must be made mutable by playing forward any name-changing // operations and applying them to the log func (lg Log) Name() string { - return lg.ops[0].Name + return lg.Ops[0].Name } // Child returns a child log for a given name, and nil if it doesn't exist func (lg Log) Child(name string) *Log { - for _, l := range lg.logs { + for _, l := range lg.Logs { if l.Name() == name { return l } @@ -257,12 +258,12 @@ func (lg Log) Child(name string) *Log { // AddChild appends a log as a direct descendant of this log func (lg *Log) AddChild(l *Log) { - lg.logs = append(lg.logs, l) + lg.Logs = append(lg.Logs, l) } // Verify confirms that the signature for a log matches func (lg Log) Verify(pub crypto.PubKey) error { - ok, err := pub.Verify(lg.SigningBytes(), lg.signature) + ok, err := pub.Verify(lg.SigningBytes(), lg.Signature) if err != nil { return err } @@ -272,17 +273,12 @@ func (lg Log) Verify(pub crypto.PubKey) error { return nil } -// Ops gives the set of operations in a log -func (lg Log) Ops() []Op { - return lg.ops -} - // Sign assigns the log signature by signing the logging checksum with a given // private key // TODO (b5) - this is assuming the log is authored by this private key. as soon // as we add collaborators, this won't be true func (lg *Log) Sign(pk crypto.PrivKey) (err error) { - lg.signature, err = pk.Sign(lg.SigningBytes()) + lg.Signature, err = pk.Sign(lg.SigningBytes()) if err != nil { return err } @@ -293,7 +289,7 @@ func (lg *Log) Sign(pk crypto.PrivKey) (err error) { // SigningBytes perpares a byte slice for signing from a log's operations func (lg Log) SigningBytes() []byte { hasher := md5.New() - for _, op := range lg.ops { + for _, op := range lg.Ops { hasher.Write([]byte(op.Ref)) } return hasher.Sum(nil) @@ -315,9 +311,9 @@ func (lg Log) SignedFlatbufferBytes(pk crypto.PrivKey) ([]byte, error) { // offset func (lg Log) MarshalFlatbuffer(builder *flatbuffers.Builder) flatbuffers.UOffsetT { // build logs bottom up, collecting offsets - logcount := len(lg.logs) + logcount := len(lg.Logs) logoffsets := make([]flatbuffers.UOffsetT, logcount) - for i, o := range lg.logs { + for i, o := range lg.Logs { logoffsets[i] = o.MarshalFlatbuffer(builder) } @@ -330,11 +326,11 @@ func (lg Log) MarshalFlatbuffer(builder *flatbuffers.Builder) flatbuffers.UOffse namestr, idstr := lg.Author() name := builder.CreateString(namestr) id := builder.CreateString(idstr) - signature := builder.CreateByteString(lg.signature) + signature := builder.CreateByteString(lg.Signature) - count := len(lg.ops) + count := len(lg.Ops) offsets := make([]flatbuffers.UOffsetT, count) - for i, o := range lg.ops { + for i, o := range lg.Ops { offsets[i] = o.MarshalFlatbuffer(builder) } @@ -353,29 +349,35 @@ func (lg Log) MarshalFlatbuffer(builder *flatbuffers.Builder) flatbuffers.UOffse return logfb.LogEnd(builder) } -// UnmarshalFlatbuffer reads a Log from +// UnmarshalFlatbufferBytes is a convenince wrapper to deserialze a flatbuffer +// slice into a log +func (lg *Log) UnmarshalFlatbufferBytes(data []byte) error { + return lg.UnmarshalFlatbuffer(logfb.GetRootAsLog(data, 0)) +} + +// UnmarshalFlatbuffer populates a logfb.Log from a Log pointer func (lg *Log) UnmarshalFlatbuffer(lfb *logfb.Log) (err error) { newLg := Log{} if len(lfb.Signature()) != 0 { - newLg.signature = lfb.Signature() + newLg.Signature = lfb.Signature() } - newLg.ops = make([]Op, lfb.OpsetLength()) + newLg.Ops = make([]Op, lfb.OpsetLength()) opfb := &logfb.Operation{} for i := 0; i < lfb.OpsetLength(); i++ { if lfb.Opset(opfb, i) { - newLg.ops[i] = UnmarshalOpFlatbuffer(opfb) + newLg.Ops[i] = UnmarshalOpFlatbuffer(opfb) } } if lfb.LogsLength() > 0 { - newLg.logs = make([]*Log, lfb.LogsLength()) + newLg.Logs = make([]*Log, lfb.LogsLength()) childfb := &logfb.Log{} for i := 0; i < lfb.LogsLength(); i++ { if lfb.Logs(childfb, i) { - newLg.logs[i] = &Log{} - newLg.logs[i].UnmarshalFlatbuffer(childfb) + newLg.Logs[i] = &Log{} + newLg.Logs[i].UnmarshalFlatbuffer(childfb) } } } diff --git a/logbook/log/log_test.go b/logbook/log/log_test.go index f6dccf048..8f9a35adf 100644 --- a/logbook/log/log_test.go +++ b/logbook/log/log_test.go @@ -31,7 +31,7 @@ func TestBookFlatbuffer(t *testing.T) { Size: 2, Note: "note!", }) - log.signature = []byte{1, 2, 3} + log.Signature = []byte{1, 2, 3} log.AddChild(InitLog(Op{ Type: OpTypeInit, diff --git a/logbook/logbook.go b/logbook/logbook.go index 5c953c919..f9a1e3551 100644 --- a/logbook/logbook.go +++ b/logbook/logbook.go @@ -24,7 +24,8 @@ import ( var ( // ErrNotFound is a sentinel error for data not found in a logbook - ErrNotFound = fmt.Errorf("logbook: not found") + ErrNotFound = fmt.Errorf("logbook: not found") + newTimestamp = func() time.Time { return time.Now() } ) const ( @@ -36,6 +37,25 @@ const ( cronJobModel uint32 = 0x0006 ) +func modelString(m uint32) string { + switch m { + case userModel: + return "user" + case nameModel: + return "name" + case versionModel: + return "version" + case publicationModel: + return "publication" + case aclModel: + return "acl" + case cronJobModel: + return "cronJob" + default: + return "" + } +} + // Book wraps a log.Book with a higher-order API specific to Qri type Book struct { bk *log.Book @@ -87,16 +107,26 @@ func NewBook(pk crypto.PrivKey, username string, fs qfs.Filesystem, location str } func (book *Book) initialize(ctx context.Context) error { - // initialize author namespace - l := log.InitLog(log.Op{ + // initialize author's log of user actions + userActions := log.InitLog(log.Op{ Type: log.OpTypeInit, Model: userModel, Name: book.bk.AuthorName(), AuthorID: book.bk.AuthorID(), - Timestamp: time.Now().UnixNano(), + Timestamp: newTimestamp().UnixNano(), + }) + book.bk.AppendLog(userActions) + + // initialize author's namespace + ns := log.InitLog(log.Op{ + Type: log.OpTypeInit, + Model: nameModel, + Name: book.bk.AuthorName(), + AuthorID: book.bk.AuthorID(), + Timestamp: newTimestamp().UnixNano(), }) + book.bk.AppendLog(ns) - book.bk.AppendLog(l) return book.Save(ctx) } @@ -111,7 +141,7 @@ func (book Book) DeleteAuthor() error { } // Save writes the book to book.location -func (book Book) Save(ctx context.Context) error { +func (book *Book) Save(ctx context.Context) error { ciphertext, err := book.bk.FlatbufferCipher() if err != nil { return err @@ -153,21 +183,22 @@ func (book Book) initName(ctx context.Context, name string) *log.Log { Model: nameModel, AuthorID: book.bk.AuthorID(), Name: name, - Timestamp: time.Now().UnixNano(), + Timestamp: newTimestamp().UnixNano(), }) ns := book.authorNamespace() - ns.AddChild(lg) + ns.Logs = append(ns.Logs, lg) return lg } func (book Book) authorNamespace() *log.Log { - for _, l := range book.bk.ModelLogs(userModel) { + for _, l := range book.bk.ModelLogs(nameModel) { if l.Name() == book.bk.AuthorName() { return l } } // this should never happen in practice + // TODO (b5): create an author namespace on the spot if this happens return nil } @@ -183,22 +214,15 @@ func (book Book) WriteNameAmend(ctx context.Context, ref dsref.Ref, newName stri Type: log.OpTypeAmend, Model: nameModel, Name: newName, - Timestamp: time.Now().UnixNano(), + Timestamp: newTimestamp().UnixNano(), }) - return nil } // WriteVersionSave adds an operation to a log marking the creation of a // dataset version. Book will copy details from the provided dataset pointer func (book Book) WriteVersionSave(ctx context.Context, ds *dataset.Dataset) error { - ref := dsref.Ref{ - Username: ds.Peername, - ProfileID: ds.ProfileID, - Name: ds.Name, - Path: ds.Path, - } - + ref := refFromDataset(ds) l, err := book.readRefLog(ref) if err != nil { if err == ErrNotFound { @@ -221,8 +245,8 @@ func (book Book) WriteVersionSave(ctx context.Context, ds *dataset.Dataset) erro } // WriteVersionAmend adds an operation to a log amending a dataset version -func (book Book) WriteVersionAmend(ctx context.Context, ref dsref.Ref, ds *dataset.Dataset) error { - l, err := book.readRefLog(ref) +func (book Book) WriteVersionAmend(ctx context.Context, ds *dataset.Dataset) error { + l, err := book.readRefLog(refFromDataset(ds)) if err != nil { return err } @@ -262,8 +286,7 @@ func (book Book) WriteVersionDelete(ctx context.Context, ref dsref.Ref, revision func (book Book) WritePublish(ctx context.Context, ref dsref.Ref, revisions int, destinations ...string) error { l, err := book.readRefLog(ref) if err != nil { - return fmt.Errorf("%#v", book.bk.ModelLogs(nameModel)[0]) - // return err + return err } l.Append(log.Op{ @@ -304,7 +327,7 @@ func (book Book) WriteCronJobRan(ctx context.Context, number int64, ref dsref.Re } l.Append(log.Op{ - Type: log.OpTypeRemove, + Type: log.OpTypeInit, Model: cronJobModel, Size: uint64(number), // TODO (b5) - finish @@ -313,22 +336,6 @@ func (book Book) WriteCronJobRan(ctx context.Context, number int64, ref dsref.Re return book.Save(ctx) } -// Author represents the author at a point in time -type Author struct { - Username string - ID string - PublicKey string -} - -// Author plays forward the current author's operation log to determine the -// latest author state -func (book Book) Author(username string) (Author, error) { - a := Author{ - Username: "", - } - return a, nil -} - // LogBytes gets signed bytes suitable for sending as a network request. // keep in mind that logs should never be sent to someone who does not have // proper permission to be disclosed log details @@ -350,13 +357,13 @@ func (book Book) Versions(ref dsref.Ref, offset, limit int) ([]dsref.Info, error } refs := []dsref.Info{} - for _, op := range l.Ops() { + for _, op := range l.Ops { if op.Model == versionModel { switch op.Type { case log.OpTypeInit: - refs = append(refs, book.infoFromOp(ref, op)) + refs = append(refs, infoFromOp(ref, op)) case log.OpTypeAmend: - refs[len(refs)-1] = book.infoFromOp(ref, op) + refs[len(refs)-1] = infoFromOp(ref, op) case log.OpTypeRemove: refs = refs[:len(refs)-int(op.Size)] } @@ -366,30 +373,172 @@ func (book Book) Versions(ref dsref.Ref, offset, limit int) ([]dsref.Info, error return refs, nil } -func (book Book) infoFromOp(ref dsref.Ref, op log.Op) dsref.Info { +// LogEntry is a simplified representation of a log operation +type LogEntry struct { + Timestamp time.Time + Author string + Action string + Note string +} + +// String formats a LogEntry as a String +func (l LogEntry) String() string { + return fmt.Sprintf("%s\t%s\t%s\t%s", l.Timestamp.Format(time.Kitchen), l.Author, l.Action, l.Note) +} + +// Logs returns +func (book Book) Logs(ref dsref.Ref, offset, limit int) ([]LogEntry, error) { + l, err := book.readRefLog(ref) + if err != nil { + return nil, err + } + + res := []LogEntry{} + for _, op := range l.Ops { + if offset > 0 { + offset-- + continue + } + res = append(res, logEntryFromOp(ref.Username, op)) + if len(res) == limit { + break + } + } + + return res, nil +} + +var actionStrings = map[uint32][3]string{ + userModel: [3]string{"create profile", "update profile", "delete profile"}, + nameModel: [3]string{"init", "rename", "delete"}, + versionModel: [3]string{"save", "amend", "remove"}, + publicationModel: [3]string{"publish", "", "unpublish"}, + aclModel: [3]string{"update access", "update access", ""}, + cronJobModel: [3]string{"ran update", "", ""}, +} + +func logEntryFromOp(author string, op log.Op) LogEntry { + return LogEntry{ + Timestamp: time.Unix(0, op.Timestamp), + Author: author, + Action: actionStrings[op.Model][int(op.Type)-1], + Note: op.Note, + } +} + +// RawLogs returns a serialized, complete set of logs keyed by model type +// logs. Most +func (book Book) RawLogs() map[string][]Log { + logs := map[string][]Log{} + for m, lgs := range book.bk.Logs() { + ls := make([]Log, len(lgs)) + for i, l := range lgs { + ls[i] = newLog(l) + } + logs[modelString(m)] = ls + } + return logs +} + +// Log is a human-oriented representation of log.Log intended for serialization +type Log struct { + Ops []Op `json:"ops,omitempty"` + Logs []Log `json:"logs,omitempty"` +} + +func newLog(lg *log.Log) Log { + ops := make([]Op, len(lg.Ops)) + for i, o := range lg.Ops { + ops[i] = newOp(o) + } + + var ls []Log + if len(lg.Logs) > 0 { + ls = make([]Log, len(lg.Logs)) + for i, l := range lg.Logs { + ls[i] = newLog(l) + } + } + + return Log{ + Ops: ops, + Logs: ls, + } +} + +// Op is a human-oriented representation of log.Op intended for serialization +type Op struct { + // type of operation + Type string `json:"type,omitempty"` + // data model to operate on + Model string `json:"model,omitempty"` + // identifier of data this operation is documenting + Ref string `json:"ref,omitempty"` + // previous reference in a causal history + Prev string `json:"prev,omitempty"` + // references this operation relates to. usage is operation type-dependant + Relations []string `json:"relations,omitempty"` + // human-readable name for the reference + Name string `json:"name,omitempty"` + // identifier for author + AuthorID string `json:"authorID,omitempty"` + // operation timestamp, for annotation purposes only + Timestamp time.Time `json:"timestamp,omitempty"` + // size of the referenced value in bytes + Size uint64 `json:"size,omitempty"` + // operation annotation for users. eg: commit title + Note string `json:"note,omitempty"` +} + +func newOp(op log.Op) Op { + return Op{ + Type: opTypeString(op.Type), + Model: modelString(op.Model), + Ref: op.Ref, + Prev: op.Prev, + Relations: op.Relations, + Name: op.Name, + AuthorID: op.AuthorID, + Timestamp: time.Unix(0, op.Timestamp), + Size: op.Size, + Note: op.Note, + } +} + +func opTypeString(op log.OpType) string { + switch op { + case log.OpTypeInit: + return "init" + case log.OpTypeAmend: + return "amend" + case log.OpTypeRemove: + return "remove" + default: + return "" + } +} + +func refFromDataset(ds *dataset.Dataset) dsref.Ref { + return dsref.Ref{ + Username: ds.Peername, + ProfileID: ds.ProfileID, + Name: ds.Name, + Path: ds.Path, + } +} + +func infoFromOp(ref dsref.Ref, op log.Op) dsref.Info { return dsref.Info{ Ref: dsref.Ref{ Username: ref.Username, Name: ref.Name, Path: op.Ref, }, - Timestamp: time.Unix(op.Timestamp, op.Timestamp), + Timestamp: time.Unix(0, op.Timestamp), CommitTitle: op.Note, } } -// ACL represents an access control list. ACL is a work in progress, not fully -// implemented -// TODO (b5) - the real version of this struct will come from a different -// package -type ACL struct { -} - -// ACL is a control list -func (book Book) ACL(alias string) (ACL, error) { - return ACL{}, fmt.Errorf("not finished") -} - func (book Book) readRefLog(ref dsref.Ref) (*log.Log, error) { if ref.Username == "" { return nil, fmt.Errorf("ref.Peername is required") @@ -414,12 +563,12 @@ func (book Book) readRefLog(ref dsref.Ref) (*log.Log, error) { func calcProfileID(privKey crypto.PrivKey) (string, error) { pubkeybytes, err := privKey.GetPublic().Bytes() if err != nil { - return "", fmt.Errorf("error getting pubkey bytes: %s", err.Error()) + return "", fmt.Errorf("getting pubkey bytes: %s", err.Error()) } mh, err := multihash.Sum(pubkeybytes, multihash.SHA2_256, 32) if err != nil { - return "", fmt.Errorf("error summing pubkey: %s", err.Error()) + return "", fmt.Errorf("summing pubkey: %s", err.Error()) } return mh.B58String(), nil diff --git a/logbook/logbook_test.go b/logbook/logbook_test.go index d9e7b7458..39a92c342 100644 --- a/logbook/logbook_test.go +++ b/logbook/logbook_test.go @@ -7,10 +7,12 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" crypto "github.com/libp2p/go-libp2p-crypto" "github.com/qri-io/dataset" "github.com/qri-io/qfs" "github.com/qri-io/qri/dsref" + "github.com/qri-io/qri/logbook/log" ) func Example() { @@ -87,7 +89,7 @@ func Example() { Length: 100, }, Path: "QmHashOfVersion2", - PreviousPath: "", + PreviousPath: "QmHashOfVersion1", } // once again, write to the log @@ -155,39 +157,405 @@ func Example() { // b5/world_bank_population@QmHashOfVersion3 } -type testRunner struct { - ctx context.Context - Username string +func TestNewBook(t *testing.T) { + pk := testPrivKey(t) + fs := qfs.NewMemFS() + + if _, err := NewBook(nil, "b5", nil, "/mem/logset"); err == nil { + t.Errorf("expected missing private key arg to error") + } + if _, err := NewBook(pk, "", nil, "/mem/logset"); err == nil { + t.Errorf("expected missing author arg to error") + } + if _, err := NewBook(pk, "b5", nil, "/mem/logset"); err == nil { + t.Errorf("expected missing filesystem arg to error") + } + if _, err := NewBook(pk, "b5", fs, ""); err == nil { + t.Errorf("expected missing location arg to error") + } + + book, err := NewBook(pk, "b5", fs, "/mem/logset") + if err != nil { + t.Fatal(err) + } + + if err := book.Load(context.Background()); err != nil { + t.Fatal(err) + } } -func newTestRunner(t *testing.T) (tr *testRunner, cleanup func()) { - return &testRunner{}, func() {} +func TestBookLog(t *testing.T) { + tr, cleanup := newTestRunner(t) + defer cleanup() + + tr.WriteWorldBankExample(t) + + entries, err := tr.Book.Logs(tr.WorldBankRef(), 0, 30) + if err != nil { + t.Fatal(err) + } + + got := make([]string, len(entries)) + for i, entry := range entries { + // convert timestamps to UTC for consistent output + entry.Timestamp = entry.Timestamp.UTC() + got[i] = entry.String() + t.Log(got[i]) + } + + expect := []string{ + "10:07PM\ttest_author\tinit\t", + "12:00AM\ttest_author\tsave\tinitial commit", + "12:00AM\ttest_author\tsave\tadded body data", + "12:00AM\ttest_author\tpublish\t", + "12:00AM\ttest_author\tunpublish\t", + "12:00AM\ttest_author\tremove\t", + "12:00AM\ttest_author\tamend\tadded meta info", + } + + if diff := cmp.Diff(expect, got); diff != "" { + t.Errorf("result mismatch (-want +got):\n%s", diff) + } } -// func TestLogInit(t *testing.T) { -// } +func TestBookRawLog(t *testing.T) { + tr, cleanup := newTestRunner(t) + defer cleanup() -func TestNewBook(t *testing.T) { - testPk := `CAASpgkwggSiAgEAAoIBAQC/7Q7fILQ8hc9g07a4HAiDKE4FahzL2eO8OlB1K99Ad4L1zc2dCg+gDVuGwdbOC29IngMA7O3UXijycckOSChgFyW3PafXoBF8Zg9MRBDIBo0lXRhW4TrVytm4Etzp4pQMyTeRYyWR8e2hGXeHArXM1R/A/SjzZUbjJYHhgvEE4OZy7WpcYcW6K3qqBGOU5GDMPuCcJWac2NgXzw6JeNsZuTimfVCJHupqG/dLPMnBOypR22dO7yJIaQ3d0PFLxiDG84X9YupF914RzJlopfdcuipI+6gFAgBw3vi6gbECEzcohjKf/4nqBOEvCDD6SXfl5F/MxoHurbGBYB2CJp+FAgMBAAECggEAaVOxe6Y5A5XzrxHBDtzjlwcBels3nm/fWScvjH4dMQXlavwcwPgKhy2NczDhr4X69oEw6Msd4hQiqJrlWd8juUg6vIsrl1wS/JAOCS65fuyJfV3Pw64rWbTPMwO3FOvxj+rFghZFQgjg/i45uHA2UUkM+h504M5Nzs6Arr/rgV7uPGR5e5OBw3lfiS9ZaA7QZiOq7sMy1L0qD49YO1ojqWu3b7UaMaBQx1Dty7b5IVOSYG+Y3U/dLjhTj4Hg1VtCHWRm3nMOE9cVpMJRhRzKhkq6gnZmni8obz2BBDF02X34oQLcHC/Wn8F3E8RiBjZDI66g+iZeCCUXvYz0vxWAQQKBgQDEJu6flyHPvyBPAC4EOxZAw0zh6SF/r8VgjbKO3n/8d+kZJeVmYnbsLodIEEyXQnr35o2CLqhCvR2kstsRSfRz79nMIt6aPWuwYkXNHQGE8rnCxxyJmxV4S63GczLk7SIn4KmqPlCI08AU0TXJS3zwh7O6e6kBljjPt1mnMgvr3QKBgQD6fAkdI0FRZSXwzygx4uSg47Co6X6ESZ9FDf6ph63lvSK5/eue/ugX6p/olMYq5CHXbLpgM4EJYdRfrH6pwqtBwUJhlh1xI6C48nonnw+oh8YPlFCDLxNG4tq6JVo071qH6CFXCIank3ThZeW5a3ZSe5pBZ8h4bUZ9H8pJL4C7yQKBgFb8SN/+/qCJSoOeOcnohhLMSSD56MAeK7KIxAF1jF5isr1TP+rqiYBtldKQX9bIRY3/8QslM7r88NNj+aAuIrjzSausXvkZedMrkXbHgS/7EAPflrkzTA8fyH10AsLgoj/68mKr5bz34nuY13hgAJUOKNbvFeC9RI5g6eIqYH0FAoGAVqFTXZp12rrK1nAvDKHWRLa6wJCQyxvTU8S1UNi2EgDJ492oAgNTLgJdb8kUiH0CH0lhZCgr9py5IKW94OSM6l72oF2UrS6PRafHC7D9b2IV5Al9lwFO/3MyBrMocapeeyaTcVBnkclz4Qim3OwHrhtFjF1ifhP9DwVRpuIg+dECgYANwlHxLe//tr6BM31PUUrOxP5Y/cj+ydxqM/z6papZFkK6Mvi/vMQQNQkh95GH9zqyC5Z/yLxur4ry1eNYty/9FnuZRAkEmlUSZ/DobhU0Pmj8Hep6JsTuMutref6vCk2n02jc9qYmJuD7iXkdXDSawbEG6f5C4MUkJ38z1t1OjA==` - data, err := base64.StdEncoding.DecodeString(testPk) + tr.WriteWorldBankExample(t) + + got := tr.Book.RawLogs() + + // data, err := json.MarshalIndent(got, "", " ") + // if err != nil { + // t.Fatal(err) + // } + // t.Logf("%s", string(data)) + + expect := map[string][]Log{ + "name": []Log{ + { + Ops: []Op{ + { + Type: "init", + Model: "name", + Name: "test_author", + AuthorID: "QmZePf5LeXow3RW5U1AgEiNbW46YnRGhZ7HPvm1UmPFPwt", + Timestamp: mustTime("2047-03-18T17:07:12.45224192-05:00"), + }, + }, + Logs: []Log{ + { + Ops: []Op{ + { + Type: "init", + Model: "name", + Name: "world_bank_population", + AuthorID: "QmZePf5LeXow3RW5U1AgEiNbW46YnRGhZ7HPvm1UmPFPwt", + Timestamp: mustTime("2047-03-18T17:07:13.45224192-05:00"), + }, + { + Type: "init", + Model: "version", + Ref: "QmHashOfVersion1", + Timestamp: mustTime("1969-12-31T19:00:00-05:00"), + Note: "initial commit", + }, + { + Type: "init", + Model: "version", + Ref: "QmHashOfVersion2", + Prev: "QmHashOfVersion1", + Timestamp: mustTime("1969-12-31T19:00:00-05:00"), + Note: "added body data", + }, + { + Type: "init", + Model: "publication", + Relations: []string{ + "registry.qri.cloud", + }, + Timestamp: mustTime("1969-12-31T19:00:00-05:00"), + Size: 2, + }, + { + Type: "remove", + Model: "publication", + Relations: []string{"registry.qri.cloud"}, + Timestamp: mustTime("1969-12-31T19:00:00-05:00"), + Size: 2, + }, + { + Type: "remove", + Model: "version", + Timestamp: mustTime("1969-12-31T19:00:00-05:00"), + Size: 1, + }, + { + Type: "amend", + Model: "version", + Ref: "QmHashOfVersion3", + Prev: "QmHashOfVersion1", + Timestamp: mustTime("1969-12-31T19:00:00-05:00"), + Note: "added meta info", + }, + }, + }, + }, + }, + }, + "user": []Log{ + { + Ops: []Op{ + { + Type: "init", + Model: "user", + Name: "test_author", + AuthorID: "QmZePf5LeXow3RW5U1AgEiNbW46YnRGhZ7HPvm1UmPFPwt", + Timestamp: mustTime("2047-03-18T17:07:11.45224192-05:00"), + }, + }, + }, + }, + } + + if diff := cmp.Diff(expect, got); diff != "" { + t.Errorf("result mismatch (-want +got):\n%s", diff) + } +} + +func TestLogTransfer(t *testing.T) { + tr, cleanup := newTestRunner(t) + defer cleanup() + + tr.WriteWorldBankExample(t) + + data, err := tr.Book.LogBytes(tr.WorldBankRef()) + if err != nil { + t.Error(err) + } + + got := &log.Log{} + if err := got.UnmarshalFlatbufferBytes(data); err != nil { + t.Error(err) + } + + // TODO (b5) - create a second book & load it. Don't have an API for this yet +} + +func TestRenameDataset(t *testing.T) { + tr, cleanup := newTestRunner(t) + defer cleanup() + + tr.WriteRenameExample(t) + + // TODO (b5) - finish: + // if _, err := tr.Book.Logs(tr.RenameInitialRef(), 0, 30); err == nil { + // t.Error("expected fetching renamed dataset to error") + // } + + // entries, err := tr.Book.Logs(tr.RenameRef(), 0, 30) + entries, err := tr.Book.Logs(tr.RenameInitialRef(), 0, 30) if err != nil { t.Fatal(err) } - pk, err := crypto.UnmarshalPrivateKey(data) + + got := make([]string, len(entries)) + for i, entry := range entries { + // convert timestamps to UTC for consistent output + entry.Timestamp = entry.Timestamp.UTC() + got[i] = entry.String() + t.Log(got[i]) + } + + expect := []string{ + "10:07PM\ttest_author\tinit\t", + "12:00AM\ttest_author\tsave\tinitial commit", + "12:00AM\ttest_author\tran update\t", + "12:00AM\ttest_author\tsave\tadded meta info", + "10:07PM\ttest_author\trename\t", + } + + if diff := cmp.Diff(expect, got); diff != "" { + t.Errorf("result mismatch (-want +got):\n%s", diff) + } +} + +func mustTime(str string) time.Time { + t, err := time.Parse(time.RFC3339, str) if err != nil { - t.Fatalf("error unmarshaling private key: %s", err.Error()) + panic(err) } + return t +} + +type testRunner struct { + Ctx context.Context + Username string + Book *Book + Fs qfs.Filesystem + Pk crypto.PrivKey + Tick int +} +func newTestRunner(t *testing.T) (tr *testRunner, cleanup func()) { + ctx := context.Background() + authorName := "test_author" + pk := testPrivKey(t) fs := qfs.NewMemFS() - book, err := NewBook(pk, "b5", fs, "/mem/logset") + prevTs := newTimestamp + tr = &testRunner{ + Ctx: ctx, + Username: authorName, + } + newTimestamp = tr.newTimestamp + + var err error + tr.Book, err = NewBook(pk, authorName, fs, "/mem/logset") if err != nil { + t.Fatalf("creating book: %s", err.Error()) + } + + cleanup = func() { + newTimestamp = prevTs + } + + return tr, cleanup +} + +func (tr *testRunner) newTimestamp() time.Time { + defer func() { tr.Tick++ }() + return time.Unix(int64(94670280000+tr.Tick), 0) +} + +func (tr *testRunner) WorldBankRef() dsref.Ref { + return dsref.Ref{Username: tr.Username, Name: "world_bank_population"} +} + +func (tr *testRunner) WriteWorldBankExample(t *testing.T) { + book := tr.Book + name := "world_bank_population" + + if err := book.WriteNameInit(tr.Ctx, name); err != nil { + panic(err) + } + + // pretend we've just created a dataset, these are the only fields the log + // will care about + ds := &dataset.Dataset{ + Peername: tr.Username, + Name: name, + Commit: &dataset.Commit{ + Timestamp: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + Title: "initial commit", + }, + Path: "QmHashOfVersion1", + PreviousPath: "", + } + + if err := book.WriteVersionSave(tr.Ctx, ds); err != nil { + panic(err) + } + + // sometime later, we create another version + ds.Commit = &dataset.Commit{ + Timestamp: time.Date(2000, time.January, 2, 0, 0, 0, 0, time.UTC), + Title: "added body data", + } + ds.Path = "QmHashOfVersion2" + ds.PreviousPath = "QmHashOfVersion1" + + if err := book.WriteVersionSave(tr.Ctx, ds); err != nil { t.Fatal(err) } - author, err := book.Author("b5") - if err != nil { + ref := dsref.Ref{Username: tr.Username, Name: name} + if err := book.WritePublish(tr.Ctx, ref, 2, "registry.qri.cloud"); err != nil { + t.Fatal(err) + } + + if err := book.WriteUnpublish(tr.Ctx, ref, 2, "registry.qri.cloud"); err != nil { + t.Fatal(err) + } + + book.WriteVersionDelete(tr.Ctx, ref, 1) + + ds.Commit.Timestamp = time.Date(2000, time.January, 3, 0, 0, 0, 0, time.UTC) + ds.Commit.Title = "added meta info" + ds.Path = "QmHashOfVersion3" + ds.PreviousPath = "QmHashOfVersion1" + + if err := book.WriteVersionAmend(tr.Ctx, ds); err != nil { t.Fatal(err) } +} + +func (tr *testRunner) RenameInitialRef() dsref.Ref { + return dsref.Ref{Username: tr.Book.bk.AuthorName(), Name: "dataset"} +} + +func (tr *testRunner) RenameRef() dsref.Ref { + return dsref.Ref{Username: tr.Book.bk.AuthorName(), Name: "renamed_dataset"} +} + +func (tr *testRunner) WriteRenameExample(t *testing.T) { + book := tr.Book + name := "dataset" + rename := "renamed_dataset" - t.Logf("%v", author) + if err := book.WriteNameInit(tr.Ctx, name); err != nil { + panic(err) + } + + // pretend we've just created a dataset, these are the only fields the log + // will care about + ds := &dataset.Dataset{ + Peername: tr.Username, + Name: name, + Commit: &dataset.Commit{ + Timestamp: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), + Title: "initial commit", + }, + Path: "QmHashOfVersion1", + PreviousPath: "", + } + + if err := book.WriteVersionSave(tr.Ctx, ds); err != nil { + panic(err) + } + + ds.Commit.Timestamp = time.Date(2000, time.January, 3, 0, 0, 0, 0, time.UTC) + ds.Commit.Title = "added meta info" + ds.Path = "QmHashOfVersion2" + ds.PreviousPath = "QmHashOfVersion1" + + // pretend we ran a cron job that created this version + ref := dsref.Ref{Username: book.bk.AuthorName(), Name: name} + if err := book.WriteCronJobRan(tr.Ctx, 1, ref); err != nil { + t.Fatal(err) + } + + if err := book.WriteVersionSave(tr.Ctx, ds); err != nil { + t.Fatal(err) + } + + if err := book.WriteNameAmend(tr.Ctx, ref, rename); err != nil { + t.Fatal(err) + } +} + +func testPrivKey(t *testing.T) crypto.PrivKey { + // logbooks are encrypted at rest, we need a private key to interact with + // them, including to create a new logbook. This is a dummy Private Key + // you should never, ever use in real life. demo only folks. + testPk := `CAASpgkwggSiAgEAAoIBAQC/7Q7fILQ8hc9g07a4HAiDKE4FahzL2eO8OlB1K99Ad4L1zc2dCg+gDVuGwdbOC29IngMA7O3UXijycckOSChgFyW3PafXoBF8Zg9MRBDIBo0lXRhW4TrVytm4Etzp4pQMyTeRYyWR8e2hGXeHArXM1R/A/SjzZUbjJYHhgvEE4OZy7WpcYcW6K3qqBGOU5GDMPuCcJWac2NgXzw6JeNsZuTimfVCJHupqG/dLPMnBOypR22dO7yJIaQ3d0PFLxiDG84X9YupF914RzJlopfdcuipI+6gFAgBw3vi6gbECEzcohjKf/4nqBOEvCDD6SXfl5F/MxoHurbGBYB2CJp+FAgMBAAECggEAaVOxe6Y5A5XzrxHBDtzjlwcBels3nm/fWScvjH4dMQXlavwcwPgKhy2NczDhr4X69oEw6Msd4hQiqJrlWd8juUg6vIsrl1wS/JAOCS65fuyJfV3Pw64rWbTPMwO3FOvxj+rFghZFQgjg/i45uHA2UUkM+h504M5Nzs6Arr/rgV7uPGR5e5OBw3lfiS9ZaA7QZiOq7sMy1L0qD49YO1ojqWu3b7UaMaBQx1Dty7b5IVOSYG+Y3U/dLjhTj4Hg1VtCHWRm3nMOE9cVpMJRhRzKhkq6gnZmni8obz2BBDF02X34oQLcHC/Wn8F3E8RiBjZDI66g+iZeCCUXvYz0vxWAQQKBgQDEJu6flyHPvyBPAC4EOxZAw0zh6SF/r8VgjbKO3n/8d+kZJeVmYnbsLodIEEyXQnr35o2CLqhCvR2kstsRSfRz79nMIt6aPWuwYkXNHQGE8rnCxxyJmxV4S63GczLk7SIn4KmqPlCI08AU0TXJS3zwh7O6e6kBljjPt1mnMgvr3QKBgQD6fAkdI0FRZSXwzygx4uSg47Co6X6ESZ9FDf6ph63lvSK5/eue/ugX6p/olMYq5CHXbLpgM4EJYdRfrH6pwqtBwUJhlh1xI6C48nonnw+oh8YPlFCDLxNG4tq6JVo071qH6CFXCIank3ThZeW5a3ZSe5pBZ8h4bUZ9H8pJL4C7yQKBgFb8SN/+/qCJSoOeOcnohhLMSSD56MAeK7KIxAF1jF5isr1TP+rqiYBtldKQX9bIRY3/8QslM7r88NNj+aAuIrjzSausXvkZedMrkXbHgS/7EAPflrkzTA8fyH10AsLgoj/68mKr5bz34nuY13hgAJUOKNbvFeC9RI5g6eIqYH0FAoGAVqFTXZp12rrK1nAvDKHWRLa6wJCQyxvTU8S1UNi2EgDJ492oAgNTLgJdb8kUiH0CH0lhZCgr9py5IKW94OSM6l72oF2UrS6PRafHC7D9b2IV5Al9lwFO/3MyBrMocapeeyaTcVBnkclz4Qim3OwHrhtFjF1ifhP9DwVRpuIg+dECgYANwlHxLe//tr6BM31PUUrOxP5Y/cj+ydxqM/z6papZFkK6Mvi/vMQQNQkh95GH9zqyC5Z/yLxur4ry1eNYty/9FnuZRAkEmlUSZ/DobhU0Pmj8Hep6JsTuMutref6vCk2n02jc9qYmJuD7iXkdXDSawbEG6f5C4MUkJ38z1t1OjA==` + data, err := base64.StdEncoding.DecodeString(testPk) + if err != nil { + t.Fatal(err) + } + pk, err := crypto.UnmarshalPrivateKey(data) + if err != nil { + t.Fatalf("error unmarshaling private key: %s", err.Error()) + } + return pk }