Skip to content

Commit

Permalink
refactor(logbook): replace events code with logbook writes
Browse files Browse the repository at this point in the history
tests caught a bug in log.Book.unmarshal that was destroying the pk ref. yay tests
  • Loading branch information
b5 committed Oct 12, 2019
1 parent 39f36d0 commit c80d204
Show file tree
Hide file tree
Showing 7 changed files with 138 additions and 42 deletions.
24 changes: 16 additions & 8 deletions actions/dataset.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"github.com/qri-io/qfs"
"github.com/qri-io/qfs/cafs"
"github.com/qri-io/qri/base"
"github.com/qri-io/qri/logbook"
"github.com/qri-io/qri/p2p"
"github.com/qri-io/qri/remote"
"github.com/qri-io/qri/repo"
Expand Down Expand Up @@ -300,6 +301,10 @@ func ModifyDataset(node *p2p.QriNode, current, new *repo.DatasetRef, isRename bo
}
if isRename {
new.Path = current.Path

if err = r.Logbook().WriteNameAmend(context.TODO(), repo.ConvertToDsref(*current), new.Name); err != nil {
return err
}
}

if err = r.DeleteRef(*current); err != nil {
Expand All @@ -309,7 +314,6 @@ func ModifyDataset(node *p2p.QriNode, current, new *repo.DatasetRef, isRename bo
return err
}

// return r.LogEvent(repo.ETDsRenamed, *new)
return nil
}

Expand All @@ -334,14 +338,14 @@ func DeleteDataset(ctx context.Context, node *p2p.QriNode, ref *repo.DatasetRef)
// TODO - this is causing bad things in our tests. For some reason core repo explodes with nil
// references when this is on and go test ./... is run from $GOPATH/github.com/qri-io/qri
// let's upgrade IPFS to the latest version & try again
// log, err := base.DatasetLog(r, *ref, 10000, 0, false)
// if err != nil {
// return err
// }
log, err := base.DatasetLog(ctx, r, *ref, 10000, 0, false)
if err != nil {
return err
}

// for _, ref := range log {
// time.Sleep(time.Millisecond * 50)
// if err = base.UnpinDataset(r, ref); err != nil {
// if err = base.UnpinDataset(r, ref); err != nil && err != repo.ErrNotPinner {
// return err
// }
// }
Expand All @@ -354,6 +358,10 @@ func DeleteDataset(ctx context.Context, node *p2p.QriNode, ref *repo.DatasetRef)
return err
}

// return r.LogEvent(repo.ETDsDeleted, *ref)
return nil
err = r.Logbook().WriteVersionDelete(ctx, repo.ConvertToDsref(*ref), len(log))
if err == logbook.ErrNotFound {
return nil
}

return err
}
13 changes: 9 additions & 4 deletions base/dataset.go
Original file line number Diff line number Diff line change
Expand Up @@ -208,12 +208,18 @@ func CreateDataset(ctx context.Context, r repo.Repo, streams ioes.IOStreams, ds,
Name: ds.Name,
Path: path,
}

if err = r.PutRef(ref); err != nil {
return
}
// if err = r.LogEvent(repo.ETDsCreated, ref); err != nil {
// return
// }

// TODO (b5): confirm these assignments happen in dsfs.CreateDataset with tests
ds.ProfileID = pro.ID.String()
ds.Peername = pro.Peername
ds.Path = path
if err = r.Logbook().WriteVersionSave(ctx, ds); err != nil {
return
}

if err = ReadDataset(ctx, r, &ref); err != nil {
return
Expand Down Expand Up @@ -321,7 +327,6 @@ func PinDataset(ctx context.Context, r repo.Repo, ref repo.DatasetRef) error {
func UnpinDataset(ctx context.Context, r repo.Repo, ref repo.DatasetRef) error {
if pinner, ok := r.Store().(cafs.Pinner); ok {
return pinner.Unpin(ctx, ref.Path, true)
// return r.LogEvent(repo.ETDsUnpinned, ref)
}
return repo.ErrNotPinner
}
Expand Down
10 changes: 5 additions & 5 deletions lib/update.go
Original file line number Diff line number Diff line change
Expand Up @@ -332,8 +332,7 @@ func (m *UpdateMethods) Run(p *Job, res *repo.DatasetRef) (err error) {
err = m.runDatasetUpdate(ctx, params, res)

case cron.JTShellScript:
return update.JobToCmd(m.inst.streams, p).Run()

err = update.JobToCmd(m.inst.streams, p).Run()
case cron.JobType(""):
return fmt.Errorf("update requires a job type to run")
default:
Expand All @@ -344,9 +343,10 @@ func (m *UpdateMethods) Run(p *Job, res *repo.DatasetRef) (err error) {
return err
}

// TODO (b5): expand event logging interface to support storing additional details
// return m.inst.Repo().LogEvent(repo.ETCronJobRan, *res)
return nil
if p.RunError == "" {
err = m.inst.Repo().Logbook().WriteCronJobRan(ctx, p.RunNumber, repo.ConvertToDsref(*res))
}
return err
}

func absolutizeJobFilepaths(j *Job) error {
Expand Down
6 changes: 4 additions & 2 deletions logbook/log/log.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,8 +163,10 @@ func (book Book) marshalFlatbuffer(builder *flatbuffers.Builder) flatbuffers.UOf

func (book *Book) unmarshalFlatbuffer(b *logfb.Book) error {
newBook := Book{
id: string(b.Identifier()),
logs: map[uint32][]*Log{},
pk: book.pk,
authorname: string(b.Name()),
id: string(b.Identifier()),
logs: map[uint32][]*Log{},
}

count := b.LogsLength()
Expand Down
59 changes: 44 additions & 15 deletions logbook/log/log_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"bytes"
"context"
"encoding/base64"
"fmt"
"testing"

"github.com/google/go-cmp/cmp"
Expand All @@ -19,6 +18,7 @@ var allowUnexported = cmp.AllowUnexported(
)

func TestBookFlatbuffer(t *testing.T) {
pk := testPrivKey(t)
log := InitLog(Op{
Type: OpTypeInit,
Model: 0x0001,
Expand All @@ -45,6 +45,8 @@ func TestBookFlatbuffer(t *testing.T) {
}))

book := &Book{
pk: pk,
authorname: "must_preserve",
logs: map[uint32][]*Log{
0x0001: []*Log{log},
},
Expand All @@ -53,12 +55,16 @@ func TestBookFlatbuffer(t *testing.T) {
data := book.flatbufferBytes()
logsetfb := logfb.GetRootAsBook(data, 0)

got := &Book{}
got := &Book{
// re-provide private key, unmarshal flatbuffer must preserve this key
// through the unmarshaling call
pk: pk,
}
if err := got.unmarshalFlatbuffer(logsetfb); err != nil {
t.Fatalf("unmarshalling flatbuffer bytes: %s", err.Error())
}

if diff := cmp.Diff(book, got, allowUnexported); diff != "" {
if diff := cmp.Diff(book, got, allowUnexported, cmp.Comparer(comparePrivKeys)); diff != "" {
t.Errorf("result mismatch (-want +got):\n%s", diff)
}
}
Expand Down Expand Up @@ -136,18 +142,7 @@ func newTestRunner(t *testing.T) (tr testRunner, cleanup func()) {
ctx := context.Background()
authorName := "test_author"
authorID := "QmTestAuthorID"
// logbooks are encrypted at rest, we need a private key to interact with
// them, including to create a new logbook. This is a dummy Private Key
// you should never, ever use in real life. demo only folks.
testPk := `CAASpgkwggSiAgEAAoIBAQC/7Q7fILQ8hc9g07a4HAiDKE4FahzL2eO8OlB1K99Ad4L1zc2dCg+gDVuGwdbOC29IngMA7O3UXijycckOSChgFyW3PafXoBF8Zg9MRBDIBo0lXRhW4TrVytm4Etzp4pQMyTeRYyWR8e2hGXeHArXM1R/A/SjzZUbjJYHhgvEE4OZy7WpcYcW6K3qqBGOU5GDMPuCcJWac2NgXzw6JeNsZuTimfVCJHupqG/dLPMnBOypR22dO7yJIaQ3d0PFLxiDG84X9YupF914RzJlopfdcuipI+6gFAgBw3vi6gbECEzcohjKf/4nqBOEvCDD6SXfl5F/MxoHurbGBYB2CJp+FAgMBAAECggEAaVOxe6Y5A5XzrxHBDtzjlwcBels3nm/fWScvjH4dMQXlavwcwPgKhy2NczDhr4X69oEw6Msd4hQiqJrlWd8juUg6vIsrl1wS/JAOCS65fuyJfV3Pw64rWbTPMwO3FOvxj+rFghZFQgjg/i45uHA2UUkM+h504M5Nzs6Arr/rgV7uPGR5e5OBw3lfiS9ZaA7QZiOq7sMy1L0qD49YO1ojqWu3b7UaMaBQx1Dty7b5IVOSYG+Y3U/dLjhTj4Hg1VtCHWRm3nMOE9cVpMJRhRzKhkq6gnZmni8obz2BBDF02X34oQLcHC/Wn8F3E8RiBjZDI66g+iZeCCUXvYz0vxWAQQKBgQDEJu6flyHPvyBPAC4EOxZAw0zh6SF/r8VgjbKO3n/8d+kZJeVmYnbsLodIEEyXQnr35o2CLqhCvR2kstsRSfRz79nMIt6aPWuwYkXNHQGE8rnCxxyJmxV4S63GczLk7SIn4KmqPlCI08AU0TXJS3zwh7O6e6kBljjPt1mnMgvr3QKBgQD6fAkdI0FRZSXwzygx4uSg47Co6X6ESZ9FDf6ph63lvSK5/eue/ugX6p/olMYq5CHXbLpgM4EJYdRfrH6pwqtBwUJhlh1xI6C48nonnw+oh8YPlFCDLxNG4tq6JVo071qH6CFXCIank3ThZeW5a3ZSe5pBZ8h4bUZ9H8pJL4C7yQKBgFb8SN/+/qCJSoOeOcnohhLMSSD56MAeK7KIxAF1jF5isr1TP+rqiYBtldKQX9bIRY3/8QslM7r88NNj+aAuIrjzSausXvkZedMrkXbHgS/7EAPflrkzTA8fyH10AsLgoj/68mKr5bz34nuY13hgAJUOKNbvFeC9RI5g6eIqYH0FAoGAVqFTXZp12rrK1nAvDKHWRLa6wJCQyxvTU8S1UNi2EgDJ492oAgNTLgJdb8kUiH0CH0lhZCgr9py5IKW94OSM6l72oF2UrS6PRafHC7D9b2IV5Al9lwFO/3MyBrMocapeeyaTcVBnkclz4Qim3OwHrhtFjF1ifhP9DwVRpuIg+dECgYANwlHxLe//tr6BM31PUUrOxP5Y/cj+ydxqM/z6papZFkK6Mvi/vMQQNQkh95GH9zqyC5Z/yLxur4ry1eNYty/9FnuZRAkEmlUSZ/DobhU0Pmj8Hep6JsTuMutref6vCk2n02jc9qYmJuD7iXkdXDSawbEG6f5C4MUkJ38z1t1OjA==`
data, err := base64.StdEncoding.DecodeString(testPk)
if err != nil {
panic(err)
}
pk, err := crypto.UnmarshalPrivateKey(data)
if err != nil {
panic(fmt.Errorf("error unmarshaling private key: %s", err.Error()))
}
pk := testPrivKey(t)

book, err := NewBook(pk, authorName, authorID)
if err != nil {
Expand Down Expand Up @@ -175,3 +170,37 @@ func (tr testRunner) RandomLog(init Op, opCount int) *Log {
}
return lg
}

func testPrivKey(t *testing.T) crypto.PrivKey {
// logbooks are encrypted at rest, we need a private key to interact with
// them, including to create a new logbook. This is a dummy Private Key
// you should never, ever use in real life. demo only folks.
testPk := `CAASpgkwggSiAgEAAoIBAQC/7Q7fILQ8hc9g07a4HAiDKE4FahzL2eO8OlB1K99Ad4L1zc2dCg+gDVuGwdbOC29IngMA7O3UXijycckOSChgFyW3PafXoBF8Zg9MRBDIBo0lXRhW4TrVytm4Etzp4pQMyTeRYyWR8e2hGXeHArXM1R/A/SjzZUbjJYHhgvEE4OZy7WpcYcW6K3qqBGOU5GDMPuCcJWac2NgXzw6JeNsZuTimfVCJHupqG/dLPMnBOypR22dO7yJIaQ3d0PFLxiDG84X9YupF914RzJlopfdcuipI+6gFAgBw3vi6gbECEzcohjKf/4nqBOEvCDD6SXfl5F/MxoHurbGBYB2CJp+FAgMBAAECggEAaVOxe6Y5A5XzrxHBDtzjlwcBels3nm/fWScvjH4dMQXlavwcwPgKhy2NczDhr4X69oEw6Msd4hQiqJrlWd8juUg6vIsrl1wS/JAOCS65fuyJfV3Pw64rWbTPMwO3FOvxj+rFghZFQgjg/i45uHA2UUkM+h504M5Nzs6Arr/rgV7uPGR5e5OBw3lfiS9ZaA7QZiOq7sMy1L0qD49YO1ojqWu3b7UaMaBQx1Dty7b5IVOSYG+Y3U/dLjhTj4Hg1VtCHWRm3nMOE9cVpMJRhRzKhkq6gnZmni8obz2BBDF02X34oQLcHC/Wn8F3E8RiBjZDI66g+iZeCCUXvYz0vxWAQQKBgQDEJu6flyHPvyBPAC4EOxZAw0zh6SF/r8VgjbKO3n/8d+kZJeVmYnbsLodIEEyXQnr35o2CLqhCvR2kstsRSfRz79nMIt6aPWuwYkXNHQGE8rnCxxyJmxV4S63GczLk7SIn4KmqPlCI08AU0TXJS3zwh7O6e6kBljjPt1mnMgvr3QKBgQD6fAkdI0FRZSXwzygx4uSg47Co6X6ESZ9FDf6ph63lvSK5/eue/ugX6p/olMYq5CHXbLpgM4EJYdRfrH6pwqtBwUJhlh1xI6C48nonnw+oh8YPlFCDLxNG4tq6JVo071qH6CFXCIank3ThZeW5a3ZSe5pBZ8h4bUZ9H8pJL4C7yQKBgFb8SN/+/qCJSoOeOcnohhLMSSD56MAeK7KIxAF1jF5isr1TP+rqiYBtldKQX9bIRY3/8QslM7r88NNj+aAuIrjzSausXvkZedMrkXbHgS/7EAPflrkzTA8fyH10AsLgoj/68mKr5bz34nuY13hgAJUOKNbvFeC9RI5g6eIqYH0FAoGAVqFTXZp12rrK1nAvDKHWRLa6wJCQyxvTU8S1UNi2EgDJ492oAgNTLgJdb8kUiH0CH0lhZCgr9py5IKW94OSM6l72oF2UrS6PRafHC7D9b2IV5Al9lwFO/3MyBrMocapeeyaTcVBnkclz4Qim3OwHrhtFjF1ifhP9DwVRpuIg+dECgYANwlHxLe//tr6BM31PUUrOxP5Y/cj+ydxqM/z6papZFkK6Mvi/vMQQNQkh95GH9zqyC5Z/yLxur4ry1eNYty/9FnuZRAkEmlUSZ/DobhU0Pmj8Hep6JsTuMutref6vCk2n02jc9qYmJuD7iXkdXDSawbEG6f5C4MUkJ38z1t1OjA==`
data, err := base64.StdEncoding.DecodeString(testPk)
if err != nil {
t.Fatal(err)
}
pk, err := crypto.UnmarshalPrivateKey(data)
if err != nil {
t.Fatalf("error unmarshaling private key: %s", err.Error())
}
return pk
}

func comparePrivKeys(a, b crypto.PrivKey) bool {
if a == nil && b != nil || a != nil && b == nil {
return false
}

abytes, err := a.Bytes()
if err != nil {
return false
}

bbytes, err := b.Bytes()
if err != nil {
return false
}

return string(abytes) == string(bbytes)
}
49 changes: 46 additions & 3 deletions logbook/logbook.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ const (
versionModel uint32 = 0x0003
publicationModel uint32 = 0x0004
aclModel uint32 = 0x0005
cronJobModel uint32 = 0x0006
)

// Book wraps a log.Book with a higher-order API specific to Qri
Expand Down Expand Up @@ -89,7 +90,7 @@ func (book *Book) initialize(ctx context.Context) error {
// initialize author namespace
l := log.InitLog(log.Op{
Type: log.OpTypeInit,
Model: nameModel,
Model: userModel,
Name: book.bk.AuthorName(),
AuthorID: book.bk.AuthorID(),
Timestamp: time.Now().UnixNano(),
Expand Down Expand Up @@ -161,7 +162,7 @@ func (book Book) initName(ctx context.Context, name string) *log.Log {
}

func (book Book) authorNamespace() *log.Log {
for _, l := range book.bk.ModelLogs(nameModel) {
for _, l := range book.bk.ModelLogs(userModel) {
if l.Name() == book.bk.AuthorName() {
return l
}
Expand All @@ -170,9 +171,34 @@ func (book Book) authorNamespace() *log.Log {
return nil
}

// WriteNameAmend marks a rename event within a namespace
func (book Book) WriteNameAmend(ctx context.Context, ref dsref.Ref, newName string) error {
// TODO (b5) - finish
l, err := book.readRefLog(ref)
if err != nil {
return err
}

l.Append(log.Op{
Type: log.OpTypeAmend,
Model: nameModel,
Name: newName,
Timestamp: time.Now().UnixNano(),
})

return nil
}

// WriteVersionSave adds an operation to a log marking the creation of a
// dataset version. Book will copy details from the provided dataset pointer
func (book Book) WriteVersionSave(ctx context.Context, ref dsref.Ref, ds *dataset.Dataset) error {
func (book Book) WriteVersionSave(ctx context.Context, ds *dataset.Dataset) error {
ref := dsref.Ref{
Username: ds.Peername,
ProfileID: ds.ProfileID,
Name: ds.Name,
Path: ds.Path,
}

l, err := book.readRefLog(ref)
if err != nil {
if err == ErrNotFound {
Expand Down Expand Up @@ -270,6 +296,23 @@ func (book Book) WriteUnpublish(ctx context.Context, ref dsref.Ref, revisions in
return book.Save(ctx)
}

// WriteCronJobRan adds an operation to a log marking the execution of a cronjob
func (book Book) WriteCronJobRan(ctx context.Context, number int64, ref dsref.Ref) error {
l, err := book.readRefLog(ref)
if err != nil {
return err
}

l.Append(log.Op{
Type: log.OpTypeRemove,
Model: cronJobModel,
Size: uint64(number),
// TODO (b5) - finish
})

return book.Save(ctx)
}

// Author represents the author at a point in time
type Author struct {
Username string
Expand Down
19 changes: 14 additions & 5 deletions logbook/logbook_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ func Example() {
// pretend we've just created a dataset, these are the only fields the log
// will care about
ds := &dataset.Dataset{
Peername: "b5",
Name: "world_bank_population",
Commit: &dataset.Commit{
Timestamp: time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC),
Title: "initial commit",
Expand All @@ -67,16 +69,16 @@ func Example() {
// need to model those properly first.
}

ref := dsref.Ref{Username: "b5", Name: "world_bank_population"}

// create a log record of the version of a dataset. In practice this'll be
// part of the overall save routine that created the above ds variable
if err := book.WriteVersionSave(ctx, ref, ds); err != nil {
if err := book.WriteVersionSave(ctx, ds); err != nil {
panic(err)
}

// sometime later, we create another version
ds2 := &dataset.Dataset{
Peername: "b5",
Name: "world_bank_population",
Commit: &dataset.Commit{
Timestamp: time.Date(2000, time.January, 2, 0, 0, 0, 0, time.UTC),
Title: "added body data",
Expand All @@ -89,10 +91,15 @@ func Example() {
}

// once again, write to the log
if err := book.WriteVersionSave(ctx, ref, ds2); err != nil {
if err := book.WriteVersionSave(ctx, ds2); err != nil {
panic(err)
}

ref := dsref.Ref{
Username: "b5",
Name: "world_bank_population",
}

// pretend we just published both saved versions of the dataset to the
// registry we log that here. Providing a revisions arg of 2 means we've
// published two consecutive revisions from head: the latest version, and the
Expand All @@ -110,6 +117,8 @@ func Example() {

// create another version
ds3 := &dataset.Dataset{
Peername: "b5",
Name: "world_bank_population",
Commit: &dataset.Commit{
Timestamp: time.Date(2000, time.January, 3, 0, 0, 0, 0, time.UTC),
Title: "added meta info",
Expand All @@ -125,7 +134,7 @@ func Example() {
}

// once again, write to the log
if err := book.WriteVersionSave(ctx, ref, ds3); err != nil {
if err := book.WriteVersionSave(ctx, ds3); err != nil {
panic(err)
}

Expand Down

0 comments on commit c80d204

Please sign in to comment.