diff --git a/actions/dataset.go b/actions/dataset.go index ed40bc631..915c1a54e 100644 --- a/actions/dataset.go +++ b/actions/dataset.go @@ -12,6 +12,7 @@ import ( "github.com/qri-io/qfs" "github.com/qri-io/qfs/cafs" "github.com/qri-io/qri/base" + "github.com/qri-io/qri/logbook" "github.com/qri-io/qri/p2p" "github.com/qri-io/qri/remote" "github.com/qri-io/qri/repo" @@ -300,6 +301,10 @@ func ModifyDataset(node *p2p.QriNode, current, new *repo.DatasetRef, isRename bo } if isRename { new.Path = current.Path + + if err = r.Logbook().WriteNameAmend(context.TODO(), repo.ConvertToDsref(*current), new.Name); err != nil { + return err + } } if err = r.DeleteRef(*current); err != nil { @@ -309,7 +314,6 @@ func ModifyDataset(node *p2p.QriNode, current, new *repo.DatasetRef, isRename bo return err } - // return r.LogEvent(repo.ETDsRenamed, *new) return nil } @@ -334,14 +338,14 @@ func DeleteDataset(ctx context.Context, node *p2p.QriNode, ref *repo.DatasetRef) // TODO - this is causing bad things in our tests. For some reason core repo explodes with nil // references when this is on and go test ./... is run from $GOPATH/github.com/qri-io/qri // let's upgrade IPFS to the latest version & try again - // log, err := base.DatasetLog(r, *ref, 10000, 0, false) - // if err != nil { - // return err - // } + log, err := base.DatasetLog(ctx, r, *ref, 10000, 0, false) + if err != nil { + return err + } // for _, ref := range log { // time.Sleep(time.Millisecond * 50) - // if err = base.UnpinDataset(r, ref); err != nil { + // if err = base.UnpinDataset(r, ref); err != nil && err != repo.ErrNotPinner { // return err // } // } @@ -354,6 +358,10 @@ func DeleteDataset(ctx context.Context, node *p2p.QriNode, ref *repo.DatasetRef) return err } - // return r.LogEvent(repo.ETDsDeleted, *ref) - return nil + err = r.Logbook().WriteVersionDelete(ctx, repo.ConvertToDsref(*ref), len(log)) + if err == logbook.ErrNotFound { + return nil + } + + return err } diff --git a/base/dataset.go b/base/dataset.go index 8fcbe626e..3f80f06d4 100644 --- a/base/dataset.go +++ b/base/dataset.go @@ -208,12 +208,18 @@ func CreateDataset(ctx context.Context, r repo.Repo, streams ioes.IOStreams, ds, Name: ds.Name, Path: path, } + if err = r.PutRef(ref); err != nil { return } - // if err = r.LogEvent(repo.ETDsCreated, ref); err != nil { - // return - // } + + // TODO (b5): confirm these assignments happen in dsfs.CreateDataset with tests + ds.ProfileID = pro.ID.String() + ds.Peername = pro.Peername + ds.Path = path + if err = r.Logbook().WriteVersionSave(ctx, ds); err != nil { + return + } if err = ReadDataset(ctx, r, &ref); err != nil { return @@ -321,7 +327,6 @@ func PinDataset(ctx context.Context, r repo.Repo, ref repo.DatasetRef) error { func UnpinDataset(ctx context.Context, r repo.Repo, ref repo.DatasetRef) error { if pinner, ok := r.Store().(cafs.Pinner); ok { return pinner.Unpin(ctx, ref.Path, true) - // return r.LogEvent(repo.ETDsUnpinned, ref) } return repo.ErrNotPinner } diff --git a/lib/update.go b/lib/update.go index 15139dbb9..9c893e475 100644 --- a/lib/update.go +++ b/lib/update.go @@ -332,8 +332,7 @@ func (m *UpdateMethods) Run(p *Job, res *repo.DatasetRef) (err error) { err = m.runDatasetUpdate(ctx, params, res) case cron.JTShellScript: - return update.JobToCmd(m.inst.streams, p).Run() - + err = update.JobToCmd(m.inst.streams, p).Run() case cron.JobType(""): return fmt.Errorf("update requires a job type to run") default: @@ -344,9 +343,10 @@ func (m *UpdateMethods) Run(p *Job, res *repo.DatasetRef) (err error) { return err } - // TODO (b5): expand event logging interface to support storing additional details - // return m.inst.Repo().LogEvent(repo.ETCronJobRan, *res) - return nil + if p.RunError == "" { + err = m.inst.Repo().Logbook().WriteCronJobRan(ctx, p.RunNumber, repo.ConvertToDsref(*res)) + } + return err } func absolutizeJobFilepaths(j *Job) error { diff --git a/logbook/log/log.go b/logbook/log/log.go index 074d9779e..d22ebc0a7 100644 --- a/logbook/log/log.go +++ b/logbook/log/log.go @@ -163,8 +163,10 @@ func (book Book) marshalFlatbuffer(builder *flatbuffers.Builder) flatbuffers.UOf func (book *Book) unmarshalFlatbuffer(b *logfb.Book) error { newBook := Book{ - id: string(b.Identifier()), - logs: map[uint32][]*Log{}, + pk: book.pk, + authorname: string(b.Name()), + id: string(b.Identifier()), + logs: map[uint32][]*Log{}, } count := b.LogsLength() diff --git a/logbook/log/log_test.go b/logbook/log/log_test.go index 9c9492314..f6dccf048 100644 --- a/logbook/log/log_test.go +++ b/logbook/log/log_test.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "encoding/base64" - "fmt" "testing" "github.com/google/go-cmp/cmp" @@ -19,6 +18,7 @@ var allowUnexported = cmp.AllowUnexported( ) func TestBookFlatbuffer(t *testing.T) { + pk := testPrivKey(t) log := InitLog(Op{ Type: OpTypeInit, Model: 0x0001, @@ -45,6 +45,8 @@ func TestBookFlatbuffer(t *testing.T) { })) book := &Book{ + pk: pk, + authorname: "must_preserve", logs: map[uint32][]*Log{ 0x0001: []*Log{log}, }, @@ -53,12 +55,16 @@ func TestBookFlatbuffer(t *testing.T) { data := book.flatbufferBytes() logsetfb := logfb.GetRootAsBook(data, 0) - got := &Book{} + got := &Book{ + // re-provide private key, unmarshal flatbuffer must preserve this key + // through the unmarshaling call + pk: pk, + } if err := got.unmarshalFlatbuffer(logsetfb); err != nil { t.Fatalf("unmarshalling flatbuffer bytes: %s", err.Error()) } - if diff := cmp.Diff(book, got, allowUnexported); diff != "" { + if diff := cmp.Diff(book, got, allowUnexported, cmp.Comparer(comparePrivKeys)); diff != "" { t.Errorf("result mismatch (-want +got):\n%s", diff) } } @@ -136,18 +142,7 @@ func newTestRunner(t *testing.T) (tr testRunner, cleanup func()) { ctx := context.Background() authorName := "test_author" authorID := "QmTestAuthorID" - // logbooks are encrypted at rest, we need a private key to interact with - // them, including to create a new logbook. This is a dummy Private Key - // you should never, ever use in real life. demo only folks. - testPk := `CAASpgkwggSiAgEAAoIBAQC/7Q7fILQ8hc9g07a4HAiDKE4FahzL2eO8OlB1K99Ad4L1zc2dCg+gDVuGwdbOC29IngMA7O3UXijycckOSChgFyW3PafXoBF8Zg9MRBDIBo0lXRhW4TrVytm4Etzp4pQMyTeRYyWR8e2hGXeHArXM1R/A/SjzZUbjJYHhgvEE4OZy7WpcYcW6K3qqBGOU5GDMPuCcJWac2NgXzw6JeNsZuTimfVCJHupqG/dLPMnBOypR22dO7yJIaQ3d0PFLxiDG84X9YupF914RzJlopfdcuipI+6gFAgBw3vi6gbECEzcohjKf/4nqBOEvCDD6SXfl5F/MxoHurbGBYB2CJp+FAgMBAAECggEAaVOxe6Y5A5XzrxHBDtzjlwcBels3nm/fWScvjH4dMQXlavwcwPgKhy2NczDhr4X69oEw6Msd4hQiqJrlWd8juUg6vIsrl1wS/JAOCS65fuyJfV3Pw64rWbTPMwO3FOvxj+rFghZFQgjg/i45uHA2UUkM+h504M5Nzs6Arr/rgV7uPGR5e5OBw3lfiS9ZaA7QZiOq7sMy1L0qD49YO1ojqWu3b7UaMaBQx1Dty7b5IVOSYG+Y3U/dLjhTj4Hg1VtCHWRm3nMOE9cVpMJRhRzKhkq6gnZmni8obz2BBDF02X34oQLcHC/Wn8F3E8RiBjZDI66g+iZeCCUXvYz0vxWAQQKBgQDEJu6flyHPvyBPAC4EOxZAw0zh6SF/r8VgjbKO3n/8d+kZJeVmYnbsLodIEEyXQnr35o2CLqhCvR2kstsRSfRz79nMIt6aPWuwYkXNHQGE8rnCxxyJmxV4S63GczLk7SIn4KmqPlCI08AU0TXJS3zwh7O6e6kBljjPt1mnMgvr3QKBgQD6fAkdI0FRZSXwzygx4uSg47Co6X6ESZ9FDf6ph63lvSK5/eue/ugX6p/olMYq5CHXbLpgM4EJYdRfrH6pwqtBwUJhlh1xI6C48nonnw+oh8YPlFCDLxNG4tq6JVo071qH6CFXCIank3ThZeW5a3ZSe5pBZ8h4bUZ9H8pJL4C7yQKBgFb8SN/+/qCJSoOeOcnohhLMSSD56MAeK7KIxAF1jF5isr1TP+rqiYBtldKQX9bIRY3/8QslM7r88NNj+aAuIrjzSausXvkZedMrkXbHgS/7EAPflrkzTA8fyH10AsLgoj/68mKr5bz34nuY13hgAJUOKNbvFeC9RI5g6eIqYH0FAoGAVqFTXZp12rrK1nAvDKHWRLa6wJCQyxvTU8S1UNi2EgDJ492oAgNTLgJdb8kUiH0CH0lhZCgr9py5IKW94OSM6l72oF2UrS6PRafHC7D9b2IV5Al9lwFO/3MyBrMocapeeyaTcVBnkclz4Qim3OwHrhtFjF1ifhP9DwVRpuIg+dECgYANwlHxLe//tr6BM31PUUrOxP5Y/cj+ydxqM/z6papZFkK6Mvi/vMQQNQkh95GH9zqyC5Z/yLxur4ry1eNYty/9FnuZRAkEmlUSZ/DobhU0Pmj8Hep6JsTuMutref6vCk2n02jc9qYmJuD7iXkdXDSawbEG6f5C4MUkJ38z1t1OjA==` - data, err := base64.StdEncoding.DecodeString(testPk) - if err != nil { - panic(err) - } - pk, err := crypto.UnmarshalPrivateKey(data) - if err != nil { - panic(fmt.Errorf("error unmarshaling private key: %s", err.Error())) - } + pk := testPrivKey(t) book, err := NewBook(pk, authorName, authorID) if err != nil { @@ -175,3 +170,37 @@ func (tr testRunner) RandomLog(init Op, opCount int) *Log { } return lg } + +func testPrivKey(t *testing.T) crypto.PrivKey { + // logbooks are encrypted at rest, we need a private key to interact with + // them, including to create a new logbook. This is a dummy Private Key + // you should never, ever use in real life. demo only folks. + testPk := `CAASpgkwggSiAgEAAoIBAQC/7Q7fILQ8hc9g07a4HAiDKE4FahzL2eO8OlB1K99Ad4L1zc2dCg+gDVuGwdbOC29IngMA7O3UXijycckOSChgFyW3PafXoBF8Zg9MRBDIBo0lXRhW4TrVytm4Etzp4pQMyTeRYyWR8e2hGXeHArXM1R/A/SjzZUbjJYHhgvEE4OZy7WpcYcW6K3qqBGOU5GDMPuCcJWac2NgXzw6JeNsZuTimfVCJHupqG/dLPMnBOypR22dO7yJIaQ3d0PFLxiDG84X9YupF914RzJlopfdcuipI+6gFAgBw3vi6gbECEzcohjKf/4nqBOEvCDD6SXfl5F/MxoHurbGBYB2CJp+FAgMBAAECggEAaVOxe6Y5A5XzrxHBDtzjlwcBels3nm/fWScvjH4dMQXlavwcwPgKhy2NczDhr4X69oEw6Msd4hQiqJrlWd8juUg6vIsrl1wS/JAOCS65fuyJfV3Pw64rWbTPMwO3FOvxj+rFghZFQgjg/i45uHA2UUkM+h504M5Nzs6Arr/rgV7uPGR5e5OBw3lfiS9ZaA7QZiOq7sMy1L0qD49YO1ojqWu3b7UaMaBQx1Dty7b5IVOSYG+Y3U/dLjhTj4Hg1VtCHWRm3nMOE9cVpMJRhRzKhkq6gnZmni8obz2BBDF02X34oQLcHC/Wn8F3E8RiBjZDI66g+iZeCCUXvYz0vxWAQQKBgQDEJu6flyHPvyBPAC4EOxZAw0zh6SF/r8VgjbKO3n/8d+kZJeVmYnbsLodIEEyXQnr35o2CLqhCvR2kstsRSfRz79nMIt6aPWuwYkXNHQGE8rnCxxyJmxV4S63GczLk7SIn4KmqPlCI08AU0TXJS3zwh7O6e6kBljjPt1mnMgvr3QKBgQD6fAkdI0FRZSXwzygx4uSg47Co6X6ESZ9FDf6ph63lvSK5/eue/ugX6p/olMYq5CHXbLpgM4EJYdRfrH6pwqtBwUJhlh1xI6C48nonnw+oh8YPlFCDLxNG4tq6JVo071qH6CFXCIank3ThZeW5a3ZSe5pBZ8h4bUZ9H8pJL4C7yQKBgFb8SN/+/qCJSoOeOcnohhLMSSD56MAeK7KIxAF1jF5isr1TP+rqiYBtldKQX9bIRY3/8QslM7r88NNj+aAuIrjzSausXvkZedMrkXbHgS/7EAPflrkzTA8fyH10AsLgoj/68mKr5bz34nuY13hgAJUOKNbvFeC9RI5g6eIqYH0FAoGAVqFTXZp12rrK1nAvDKHWRLa6wJCQyxvTU8S1UNi2EgDJ492oAgNTLgJdb8kUiH0CH0lhZCgr9py5IKW94OSM6l72oF2UrS6PRafHC7D9b2IV5Al9lwFO/3MyBrMocapeeyaTcVBnkclz4Qim3OwHrhtFjF1ifhP9DwVRpuIg+dECgYANwlHxLe//tr6BM31PUUrOxP5Y/cj+ydxqM/z6papZFkK6Mvi/vMQQNQkh95GH9zqyC5Z/yLxur4ry1eNYty/9FnuZRAkEmlUSZ/DobhU0Pmj8Hep6JsTuMutref6vCk2n02jc9qYmJuD7iXkdXDSawbEG6f5C4MUkJ38z1t1OjA==` + data, err := base64.StdEncoding.DecodeString(testPk) + if err != nil { + t.Fatal(err) + } + pk, err := crypto.UnmarshalPrivateKey(data) + if err != nil { + t.Fatalf("error unmarshaling private key: %s", err.Error()) + } + return pk +} + +func comparePrivKeys(a, b crypto.PrivKey) bool { + if a == nil && b != nil || a != nil && b == nil { + return false + } + + abytes, err := a.Bytes() + if err != nil { + return false + } + + bbytes, err := b.Bytes() + if err != nil { + return false + } + + return string(abytes) == string(bbytes) +} diff --git a/logbook/logbook.go b/logbook/logbook.go index c5072f7aa..5c953c919 100644 --- a/logbook/logbook.go +++ b/logbook/logbook.go @@ -33,6 +33,7 @@ const ( versionModel uint32 = 0x0003 publicationModel uint32 = 0x0004 aclModel uint32 = 0x0005 + cronJobModel uint32 = 0x0006 ) // Book wraps a log.Book with a higher-order API specific to Qri @@ -89,7 +90,7 @@ func (book *Book) initialize(ctx context.Context) error { // initialize author namespace l := log.InitLog(log.Op{ Type: log.OpTypeInit, - Model: nameModel, + Model: userModel, Name: book.bk.AuthorName(), AuthorID: book.bk.AuthorID(), Timestamp: time.Now().UnixNano(), @@ -161,7 +162,7 @@ func (book Book) initName(ctx context.Context, name string) *log.Log { } func (book Book) authorNamespace() *log.Log { - for _, l := range book.bk.ModelLogs(nameModel) { + for _, l := range book.bk.ModelLogs(userModel) { if l.Name() == book.bk.AuthorName() { return l } @@ -170,9 +171,34 @@ func (book Book) authorNamespace() *log.Log { return nil } +// WriteNameAmend marks a rename event within a namespace +func (book Book) WriteNameAmend(ctx context.Context, ref dsref.Ref, newName string) error { + // TODO (b5) - finish + l, err := book.readRefLog(ref) + if err != nil { + return err + } + + l.Append(log.Op{ + Type: log.OpTypeAmend, + Model: nameModel, + Name: newName, + Timestamp: time.Now().UnixNano(), + }) + + return nil +} + // WriteVersionSave adds an operation to a log marking the creation of a // dataset version. Book will copy details from the provided dataset pointer -func (book Book) WriteVersionSave(ctx context.Context, ref dsref.Ref, ds *dataset.Dataset) error { +func (book Book) WriteVersionSave(ctx context.Context, ds *dataset.Dataset) error { + ref := dsref.Ref{ + Username: ds.Peername, + ProfileID: ds.ProfileID, + Name: ds.Name, + Path: ds.Path, + } + l, err := book.readRefLog(ref) if err != nil { if err == ErrNotFound { @@ -270,6 +296,23 @@ func (book Book) WriteUnpublish(ctx context.Context, ref dsref.Ref, revisions in return book.Save(ctx) } +// WriteCronJobRan adds an operation to a log marking the execution of a cronjob +func (book Book) WriteCronJobRan(ctx context.Context, number int64, ref dsref.Ref) error { + l, err := book.readRefLog(ref) + if err != nil { + return err + } + + l.Append(log.Op{ + Type: log.OpTypeRemove, + Model: cronJobModel, + Size: uint64(number), + // TODO (b5) - finish + }) + + return book.Save(ctx) +} + // Author represents the author at a point in time type Author struct { Username string diff --git a/logbook/logbook_test.go b/logbook/logbook_test.go index f50703a2f..d9e7b7458 100644 --- a/logbook/logbook_test.go +++ b/logbook/logbook_test.go @@ -57,6 +57,8 @@ func Example() { // pretend we've just created a dataset, these are the only fields the log // will care about ds := &dataset.Dataset{ + Peername: "b5", + Name: "world_bank_population", Commit: &dataset.Commit{ Timestamp: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC), Title: "initial commit", @@ -67,16 +69,16 @@ func Example() { // need to model those properly first. } - ref := dsref.Ref{Username: "b5", Name: "world_bank_population"} - // create a log record of the version of a dataset. In practice this'll be // part of the overall save routine that created the above ds variable - if err := book.WriteVersionSave(ctx, ref, ds); err != nil { + if err := book.WriteVersionSave(ctx, ds); err != nil { panic(err) } // sometime later, we create another version ds2 := &dataset.Dataset{ + Peername: "b5", + Name: "world_bank_population", Commit: &dataset.Commit{ Timestamp: time.Date(2000, time.January, 2, 0, 0, 0, 0, time.UTC), Title: "added body data", @@ -89,10 +91,15 @@ func Example() { } // once again, write to the log - if err := book.WriteVersionSave(ctx, ref, ds2); err != nil { + if err := book.WriteVersionSave(ctx, ds2); err != nil { panic(err) } + ref := dsref.Ref{ + Username: "b5", + Name: "world_bank_population", + } + // pretend we just published both saved versions of the dataset to the // registry we log that here. Providing a revisions arg of 2 means we've // published two consecutive revisions from head: the latest version, and the @@ -110,6 +117,8 @@ func Example() { // create another version ds3 := &dataset.Dataset{ + Peername: "b5", + Name: "world_bank_population", Commit: &dataset.Commit{ Timestamp: time.Date(2000, time.January, 3, 0, 0, 0, 0, time.UTC), Title: "added meta info", @@ -125,7 +134,7 @@ func Example() { } // once again, write to the log - if err := book.WriteVersionSave(ctx, ref, ds3); err != nil { + if err := book.WriteVersionSave(ctx, ds3); err != nil { panic(err) }