diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 08444fe3ef2..ab05934b90b 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,6 +1,6 @@ { "ImportPath": "github.com/jbenet/go-ipfs", - "GoVersion": "go1.3.1", + "GoVersion": "go1.3", "Packages": [ "./..." ], @@ -19,6 +19,11 @@ "Comment": "null-219", "Rev": "00a7d3b31bbab5795b4a51933c04fc2768242970" }, + { + "ImportPath": "code.google.com/p/go.net/context", + "Comment": "null-144", + "Rev": "ad01a6fcc8a19d3a4478c836895ffe883bd2ceab" + }, { "ImportPath": "code.google.com/p/gogoprotobuf/proto", "Rev": "6c980277330804e94257ac7ef70a3adbe1641059" @@ -55,8 +60,8 @@ }, { "ImportPath": "github.com/jbenet/go-multiaddr", - "Comment": "0.1.2", - "Rev": "b90678896b52c3e5a4c8176805c6facc3fe3eb82" + "Comment": "0.1.2-3-g74443fc", + "Rev": "74443fca319c4c2f5e9968b8e268c30a4a74dc64" }, { "ImportPath": "github.com/jbenet/go-multihash", @@ -66,6 +71,10 @@ { "ImportPath": "github.com/syndtr/goleveldb/leveldb", "Rev": "99056d50e56252fbe0021d5c893defca5a76baf8" + }, + { + "ImportPath": "github.com/tuxychandru/pubsub", + "Rev": "02de8aa2db3d570c5ab1be5ba67b456fd0fb7c4e" } ] } diff --git a/Godeps/_workspace/src/code.google.com/p/go.net/context/context.go b/Godeps/_workspace/src/code.google.com/p/go.net/context/context.go new file mode 100644 index 00000000000..e3c5345d757 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.net/context/context.go @@ -0,0 +1,431 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // DoSomething calls DoSomethingSlow and returns as soon as + // // it returns or ctx.Done is closed. + // func DoSomething(ctx context.Context) (Result, error) { + // c := make(chan Result, 1) + // go func() { c <- DoSomethingSlow(ctx) }() + // select { + // case res := <-c: + // return res, nil + // case <-ctx.Done(): + // return nil, ctx.Err() + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "code.google.com/p/go.net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// An emptyCtx is never canceled, has no values, and has no deadline. +type emptyCtx int + +func (emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (emptyCtx) Done() <-chan struct{} { + return nil +} + +func (emptyCtx) Err() error { + return nil +} + +func (emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (n emptyCtx) String() string { + switch n { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +const ( + background emptyCtx = 1 + todo emptyCtx = 2 +) + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it's is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, &c) + return &c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) cancelCtx { + return cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return &c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + if p, ok := parentCancelCtx(c.Context); ok { + p.mu.Lock() + if p.children != nil { + delete(p.children, c) + } + p.mu.Unlock() + } + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with the deadline +// timer, so code should call cancel as soon as the operations running in this +// Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(removeFromParent, err) + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with the deadline +// timer, so code should call cancel as soon as the operations running in this +// Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/Godeps/_workspace/src/code.google.com/p/go.net/context/context_test.go b/Godeps/_workspace/src/code.google.com/p/go.net/context/context_test.go new file mode 100644 index 00000000000..c1a4de5ff77 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.net/context/context_test.go @@ -0,0 +1,553 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "fmt" + "math/rand" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +// otherContext is a Context that's not one of the types defined in context.go. +// This lets us test code paths that differ based on the underlying type of the +// Context. +type otherContext struct { + Context +} + +func TestBackground(t *testing.T) { + c := Background() + if c == nil { + t.Fatalf("Background returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.Background"; got != want { + t.Errorf("Background().String() = %q want %q", got, want) + } +} + +func TestTODO(t *testing.T) { + c := TODO() + if c == nil { + t.Fatalf("TODO returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.TODO"; got != want { + t.Errorf("TODO().String() = %q want %q", got, want) + } +} + +func TestWithCancel(t *testing.T) { + c1, cancel := WithCancel(Background()) + + if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { + t.Errorf("c1.String() = %q want %q", got, want) + } + + o := otherContext{c1} + c2, _ := WithCancel(o) + contexts := []Context{c1, o, c2} + + for i, c := range contexts { + if d := c.Done(); d == nil { + t.Errorf("c[%d].Done() == %v want non-nil", i, d) + } + if e := c.Err(); e != nil { + t.Errorf("c[%d].Err() == %v want nil", i, e) + } + + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + } + + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + + for i, c := range contexts { + select { + case <-c.Done(): + default: + t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) + } + if e := c.Err(); e != Canceled { + t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) + } + } +} + +func TestParentFinishesChild(t *testing.T) { + // Context tree: + // parent -> cancelChild + // parent -> valueChild -> timerChild + parent, cancel := WithCancel(Background()) + cancelChild, stop := WithCancel(parent) + defer stop() + valueChild := WithValue(parent, "key", "value") + timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) + defer stop() + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-cancelChild.Done(): + t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) + case x := <-timerChild.Done(): + t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) + case x := <-valueChild.Done(): + t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) + default: + } + + // The parent's children should contain the two cancelable children. + pc := parent.(*cancelCtx) + cc := cancelChild.(*cancelCtx) + tc := timerChild.(*timerCtx) + pc.mu.Lock() + if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { + t.Errorf("bad linkage: pc.children = %v, want %v and %v", + pc.children, cc, tc) + } + pc.mu.Unlock() + + if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) + } + if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) + } + + cancel() + + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) + } + pc.mu.Unlock() + + // parent and children should all be finished. + check := func(ctx Context, name string) { + select { + case <-ctx.Done(): + default: + t.Errorf("<-%s.Done() blocked, but shouldn't have", name) + } + if e := ctx.Err(); e != Canceled { + t.Errorf("%s.Err() == %v want %v", name, e, Canceled) + } + } + check(parent, "parent") + check(cancelChild, "cancelChild") + check(valueChild, "valueChild") + check(timerChild, "timerChild") + + // WithCancel should return a canceled context on a canceled parent. + precanceledChild := WithValue(parent, "key", "value") + select { + case <-precanceledChild.Done(): + default: + t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") + } + if e := precanceledChild.Err(); e != Canceled { + t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) + } +} + +func TestChildFinishesFirst(t *testing.T) { + cancelable, stop := WithCancel(Background()) + defer stop() + for _, parent := range []Context{Background(), cancelable} { + child, cancel := WithCancel(parent) + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-child.Done(): + t.Errorf("<-child.Done() == %v want nothing (it should block)", x) + default: + } + + cc := child.(*cancelCtx) + pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() + if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { + t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) + } + + if pcok { + pc.mu.Lock() + if len(pc.children) != 1 || !pc.children[cc] { + t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) + } + pc.mu.Unlock() + } + + cancel() + + if pcok { + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) + } + pc.mu.Unlock() + } + + // child should be finished. + select { + case <-child.Done(): + default: + t.Errorf("<-child.Done() blocked, but shouldn't have") + } + if e := child.Err(); e != Canceled { + t.Errorf("child.Err() == %v want %v", e, Canceled) + } + + // parent should not be finished. + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + default: + } + if e := parent.Err(); e != nil { + t.Errorf("parent.Err() == %v want nil", e) + } + } +} + +func testDeadline(c Context, wait time.Duration, t *testing.T) { + select { + case <-time.After(wait): + t.Fatalf("context should have timed out") + case <-c.Done(): + } + if e := c.Err(); e != DeadlineExceeded { + t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) + } +} + +func TestDeadline(t *testing.T) { + c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 200*time.Millisecond, t) + + c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + o := otherContext{c} + testDeadline(o, 200*time.Millisecond, t) + + c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + o = otherContext{c} + c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond)) + testDeadline(c, 200*time.Millisecond, t) +} + +func TestTimeout(t *testing.T) { + c, _ := WithTimeout(Background(), 100*time.Millisecond) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 200*time.Millisecond, t) + + c, _ = WithTimeout(Background(), 100*time.Millisecond) + o := otherContext{c} + testDeadline(o, 200*time.Millisecond, t) + + c, _ = WithTimeout(Background(), 100*time.Millisecond) + o = otherContext{c} + c, _ = WithTimeout(o, 300*time.Millisecond) + testDeadline(c, 200*time.Millisecond, t) +} + +func TestCanceledTimeout(t *testing.T) { + c, _ := WithTimeout(Background(), 200*time.Millisecond) + o := otherContext{c} + c, cancel := WithTimeout(o, 400*time.Millisecond) + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + select { + case <-c.Done(): + default: + t.Errorf("<-c.Done() blocked, but shouldn't have") + } + if e := c.Err(); e != Canceled { + t.Errorf("c.Err() == %v want %v", e, Canceled) + } +} + +type key1 int +type key2 int + +var k1 = key1(1) +var k2 = key2(1) // same int as k1, different type +var k3 = key2(3) // same type as k2, different int + +func TestValues(t *testing.T) { + check := func(c Context, nm, v1, v2, v3 string) { + if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { + t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) + } + if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { + t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) + } + if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { + t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) + } + } + + c0 := Background() + check(c0, "c0", "", "", "") + + c1 := WithValue(Background(), k1, "c1k1") + check(c1, "c1", "c1k1", "", "") + + if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { + t.Errorf("c.String() = %q want %q", got, want) + } + + c2 := WithValue(c1, k2, "c2k2") + check(c2, "c2", "c1k1", "c2k2", "") + + c3 := WithValue(c2, k3, "c3k3") + check(c3, "c2", "c1k1", "c2k2", "c3k3") + + c4 := WithValue(c3, k1, nil) + check(c4, "c4", "", "c2k2", "c3k3") + + o0 := otherContext{Background()} + check(o0, "o0", "", "", "") + + o1 := otherContext{WithValue(Background(), k1, "c1k1")} + check(o1, "o1", "c1k1", "", "") + + o2 := WithValue(o1, k2, "o2k2") + check(o2, "o2", "c1k1", "o2k2", "") + + o3 := otherContext{c4} + check(o3, "o3", "", "c2k2", "c3k3") + + o4 := WithValue(o3, k3, nil) + check(o4, "o4", "", "c2k2", "") +} + +func TestAllocs(t *testing.T) { + bg := Background() + for _, test := range []struct { + desc string + f func() + limit float64 + gccgoLimit float64 + }{ + { + desc: "Background()", + f: func() { Background() }, + limit: 0, + gccgoLimit: 0, + }, + { + desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), + f: func() { + c := WithValue(bg, k1, nil) + c.Value(k1) + }, + limit: 1, + gccgoLimit: 3, + }, + { + desc: "WithTimeout(bg, 15*time.Millisecond)", + f: func() { + c, _ := WithTimeout(bg, 15*time.Millisecond) + <-c.Done() + }, + limit: 8, + gccgoLimit: 13, + }, + { + desc: "WithCancel(bg)", + f: func() { + c, cancel := WithCancel(bg) + cancel() + <-c.Done() + }, + limit: 5, + gccgoLimit: 8, + }, + { + desc: "WithTimeout(bg, 100*time.Millisecond)", + f: func() { + c, cancel := WithTimeout(bg, 100*time.Millisecond) + cancel() + <-c.Done() + }, + limit: 8, + gccgoLimit: 25, + }, + } { + limit := test.limit + if runtime.Compiler == "gccgo" { + // gccgo does not yet do escape analysis. + // TOOD(iant): Remove this when gccgo does do escape analysis. + limit = test.gccgoLimit + } + if n := testing.AllocsPerRun(100, test.f); n > limit { + t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) + } + } +} + +func TestSimultaneousCancels(t *testing.T) { + root, cancel := WithCancel(Background()) + m := map[Context]CancelFunc{root: cancel} + q := []Context{root} + // Create a tree of contexts. + for len(q) != 0 && len(m) < 100 { + parent := q[0] + q = q[1:] + for i := 0; i < 4; i++ { + ctx, cancel := WithCancel(parent) + m[ctx] = cancel + q = append(q, ctx) + } + } + // Start all the cancels in a random order. + var wg sync.WaitGroup + wg.Add(len(m)) + for _, cancel := range m { + go func(cancel CancelFunc) { + cancel() + wg.Done() + }(cancel) + } + // Wait on all the contexts in a random order. + for ctx := range m { + select { + case <-ctx.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) + } + } + // Wait for all the cancel functions to return. + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) + } +} + +func TestInterlockedCancels(t *testing.T) { + parent, cancelParent := WithCancel(Background()) + child, cancelChild := WithCancel(parent) + go func() { + parent.Done() + cancelChild() + }() + cancelParent() + select { + case <-child.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) + } +} + +func TestLayersCancel(t *testing.T) { + testLayers(t, time.Now().UnixNano(), false) +} + +func TestLayersTimeout(t *testing.T) { + testLayers(t, time.Now().UnixNano(), true) +} + +func testLayers(t *testing.T, seed int64, testTimeout bool) { + rand.Seed(seed) + errorf := func(format string, a ...interface{}) { + t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) + } + const ( + timeout = 200 * time.Millisecond + minLayers = 30 + ) + type value int + var ( + vals []*value + cancels []CancelFunc + numTimers int + ctx = Background() + ) + for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { + switch rand.Intn(3) { + case 0: + v := new(value) + ctx = WithValue(ctx, v, v) + vals = append(vals, v) + case 1: + var cancel CancelFunc + ctx, cancel = WithCancel(ctx) + cancels = append(cancels, cancel) + case 2: + var cancel CancelFunc + ctx, cancel = WithTimeout(ctx, timeout) + cancels = append(cancels, cancel) + numTimers++ + } + } + checkValues := func(when string) { + for _, key := range vals { + if val := ctx.Value(key).(*value); key != val { + errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) + } + } + } + select { + case <-ctx.Done(): + errorf("ctx should not be canceled yet") + default: + } + if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { + t.Errorf("ctx.String() = %q want prefix %q", s, prefix) + } + t.Log(ctx) + checkValues("before cancel") + if testTimeout { + select { + case <-ctx.Done(): + case <-time.After(timeout + timeout/10): + errorf("ctx should have timed out") + } + checkValues("after timeout") + } else { + cancel := cancels[rand.Intn(len(cancels))] + cancel() + select { + case <-ctx.Done(): + default: + errorf("ctx should be canceled") + } + checkValues("after cancel") + } +} diff --git a/Godeps/_workspace/src/code.google.com/p/go.net/context/withtimeout_test.go b/Godeps/_workspace/src/code.google.com/p/go.net/context/withtimeout_test.go new file mode 100644 index 00000000000..64854d81b5f --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.net/context/withtimeout_test.go @@ -0,0 +1,26 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context_test + +import ( + "fmt" + "time" + + "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" +) + +func ExampleWithTimeout() { + // Pass a context with a timeout to tell a blocking function that it + // should abandon its work after the timeout elapses. + ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + select { + case <-time.After(200 * time.Millisecond): + fmt.Println("overslept") + case <-ctx.Done(): + fmt.Println(ctx.Err()) // prints "context deadline exceeded" + } + // Output: + // context deadline exceeded +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/codec.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/codec.go index 527f0cb5873..c7974265230 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/codec.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/codec.go @@ -66,12 +66,14 @@ func bytesToString(b []byte) (ret string, err error) { func addressStringToBytes(p *Protocol, s string) []byte { switch p.Code { - // ipv4,6 - case 4, 41: + case P_IP4: // ipv4 return net.ParseIP(s).To4() + case P_IP6: // ipv6 + return net.ParseIP(s).To16() + // tcp udp dccp sctp - case 6, 17, 33, 132: + case P_TCP, P_UDP, P_DCCP, P_SCTP: b := make([]byte, 2) i, err := strconv.Atoi(s) if err == nil { @@ -87,11 +89,11 @@ func addressBytesToString(p *Protocol, b []byte) string { switch p.Code { // ipv4,6 - case 4, 41: + case P_IP4, P_IP6: return net.IP(b).String() // tcp udp dccp sctp - case 6, 17, 33, 132: + case P_TCP, P_UDP, P_DCCP, P_SCTP: i := binary.BigEndian.Uint16(b) return strconv.Itoa(int(i)) } diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/index.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/index.go index df22012e926..a55fa669ec7 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/index.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/index.go @@ -1,6 +1,7 @@ package multiaddr import ( + "bytes" "fmt" "strings" ) @@ -19,6 +20,11 @@ func NewMultiaddr(s string) (*Multiaddr, error) { return &Multiaddr{Bytes: b}, nil } +// Equal tests whether two multiaddrs are equal +func (m *Multiaddr) Equal(m2 *Multiaddr) bool { + return bytes.Equal(m.Bytes, m2.Bytes) +} + // String returns the string representation of a Multiaddr func (m *Multiaddr) String() (string, error) { return bytesToString(m.Bytes) diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr_test.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr_test.go index 65cb972197d..7bc2e92bfce 100644 --- a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr_test.go +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/multiaddr_test.go @@ -6,6 +6,40 @@ import ( "testing" ) +func newMultiaddr(t *testing.T, a string) *Multiaddr { + m, err := NewMultiaddr(a) + if err != nil { + t.Error(err) + } + return m +} + +func TestEqual(t *testing.T) { + m1 := newMultiaddr(t, "/ip4/127.0.0.1/udp/1234") + m2 := newMultiaddr(t, "/ip4/127.0.0.1/tcp/1234") + m3 := newMultiaddr(t, "/ip4/127.0.0.1/tcp/1234") + + if m1.Equal(m2) { + t.Error("should not be equal") + } + + if m2.Equal(m1) { + t.Error("should not be equal") + } + + if !m2.Equal(m3) { + t.Error("should be equal") + } + + if !m3.Equal(m2) { + t.Error("should be equal") + } + + if !m1.Equal(m1) { + t.Error("should be equal") + } +} + func TestStringToBytes(t *testing.T) { testString := func(s string, h string) { diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net.go new file mode 100644 index 00000000000..516fe8392f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net.go @@ -0,0 +1,77 @@ +package multiaddr + +import ( + "fmt" + "net" +) + +var errIncorrectNetAddr = fmt.Errorf("incorrect network addr conversion") + +// FromNetAddr converts a net.Addr type to a Multiaddr. +func FromNetAddr(a net.Addr) (*Multiaddr, error) { + switch a.Network() { + case "tcp", "tcp4", "tcp6": + ac, ok := a.(*net.TCPAddr) + if !ok { + return nil, errIncorrectNetAddr + } + + // Get IP Addr + ipm, err := FromIP(ac.IP) + if err != nil { + return nil, errIncorrectNetAddr + } + + // Get TCP Addr + tcpm, err := NewMultiaddr(fmt.Sprintf("/tcp/%d", ac.Port)) + if err != nil { + return nil, errIncorrectNetAddr + } + + // Encapsulate + return ipm.Encapsulate(tcpm), nil + + case "udp", "upd4", "udp6": + ac, ok := a.(*net.UDPAddr) + if !ok { + return nil, errIncorrectNetAddr + } + + // Get IP Addr + ipm, err := FromIP(ac.IP) + if err != nil { + return nil, errIncorrectNetAddr + } + + // Get UDP Addr + udpm, err := NewMultiaddr(fmt.Sprintf("/udp/%d", ac.Port)) + if err != nil { + return nil, errIncorrectNetAddr + } + + // Encapsulate + return ipm.Encapsulate(udpm), nil + + case "ip", "ip4", "ip6": + ac, ok := a.(*net.IPAddr) + if !ok { + return nil, errIncorrectNetAddr + } + return FromIP(ac.IP) + + default: + return nil, fmt.Errorf("unknown network %v", a.Network()) + } +} + +// FromIP converts a net.IP type to a Multiaddr. +func FromIP(ip net.IP) (*Multiaddr, error) { + switch { + case ip.To4() != nil: + return NewMultiaddr("/ip4/" + ip.String()) + case ip.To16() != nil: + return NewMultiaddr("/ip6/" + ip.String()) + default: + return nil, errIncorrectNetAddr + } +} diff --git a/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net_test.go b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net_test.go new file mode 100644 index 00000000000..fd1ede1f1c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/jbenet/go-multiaddr/net_test.go @@ -0,0 +1,49 @@ +package multiaddr + +import ( + "net" + "testing" +) + +type GenFunc func() (*Multiaddr, error) + +func testConvert(t *testing.T, s string, gen GenFunc) { + m, err := gen() + if err != nil { + t.Fatal("failed to generate.") + } + + if s2, _ := m.String(); err != nil || s2 != s { + t.Fatal("failed to convert: " + s + " != " + s2) + } +} + +func TestFromIP4(t *testing.T) { + testConvert(t, "/ip4/10.20.30.40", func() (*Multiaddr, error) { + return FromIP(net.ParseIP("10.20.30.40")) + }) +} + +func TestFromIP6(t *testing.T) { + testConvert(t, "/ip6/2001:4860:0:2001::68", func() (*Multiaddr, error) { + return FromIP(net.ParseIP("2001:4860:0:2001::68")) + }) +} + +func TestFromTCP(t *testing.T) { + testConvert(t, "/ip4/10.20.30.40/tcp/1234", func() (*Multiaddr, error) { + return FromNetAddr(&net.TCPAddr{ + IP: net.ParseIP("10.20.30.40"), + Port: 1234, + }) + }) +} + +func TestFromUDP(t *testing.T) { + testConvert(t, "/ip4/10.20.30.40/udp/1234", func() (*Multiaddr, error) { + return FromNetAddr(&net.UDPAddr{ + IP: net.ParseIP("10.20.30.40"), + Port: 1234, + }) + }) +} diff --git a/Godeps/_workspace/src/github.com/tuxychandru/pubsub/README.md b/Godeps/_workspace/src/github.com/tuxychandru/pubsub/README.md new file mode 100644 index 00000000000..c1aab80b5d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/tuxychandru/pubsub/README.md @@ -0,0 +1,30 @@ +Install pubsub with, + + go get github.com/tuxychandru/pubsub + +View the [API Documentation](http://godoc.org/github.com/tuxychandru/pubsub). + +## License + +Copyright (c) 2013, Chandra Sekar S +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/tuxychandru/pubsub/pubsub.go b/Godeps/_workspace/src/github.com/tuxychandru/pubsub/pubsub.go new file mode 100644 index 00000000000..9cbf9cffad2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/tuxychandru/pubsub/pubsub.go @@ -0,0 +1,208 @@ +// Copyright 2013, Chandra Sekar S. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the README.md file. + +// Package pubsub implements a simple multi-topic pub-sub +// library. +// +// Topics must be strings and messages of any type can be +// published. A topic can have any number of subcribers and +// all of them receive messages published on the topic. +package pubsub + +type operation int + +const ( + sub operation = iota + subOnce + pub + unsub + unsubAll + closeTopic + shutdown +) + +// PubSub is a collection of topics. +type PubSub struct { + cmdChan chan cmd + capacity int +} + +type cmd struct { + op operation + topics []string + ch chan interface{} + msg interface{} +} + +// New creates a new PubSub and starts a goroutine for handling operations. +// The capacity of the channels created by Sub and SubOnce will be as specified. +func New(capacity int) *PubSub { + ps := &PubSub{make(chan cmd), capacity} + go ps.start() + return ps +} + +// Sub returns a channel on which messages published on any of +// the specified topics can be received. +func (ps *PubSub) Sub(topics ...string) chan interface{} { + return ps.sub(sub, topics...) +} + +// SubOnce is similar to Sub, but only the first message published, after subscription, +// on any of the specified topics can be received. +func (ps *PubSub) SubOnce(topics ...string) chan interface{} { + return ps.sub(subOnce, topics...) +} + +func (ps *PubSub) sub(op operation, topics ...string) chan interface{} { + ch := make(chan interface{}, ps.capacity) + ps.cmdChan <- cmd{op: op, topics: topics, ch: ch} + return ch +} + +// AddSub adds subscriptions to an existing channel. +func (ps *PubSub) AddSub(ch chan interface{}, topics ...string) { + ps.cmdChan <- cmd{op: sub, topics: topics, ch: ch} +} + +// Pub publishes the given message to all subscribers of +// the specified topics. +func (ps *PubSub) Pub(msg interface{}, topics ...string) { + ps.cmdChan <- cmd{op: pub, topics: topics, msg: msg} +} + +// Unsub unsubscribes the given channel from the specified +// topics. If no topic is specified, it is unsubscribed +// from all topics. +func (ps *PubSub) Unsub(ch chan interface{}, topics ...string) { + if len(topics) == 0 { + ps.cmdChan <- cmd{op: unsubAll, ch: ch} + return + } + + ps.cmdChan <- cmd{op: unsub, topics: topics, ch: ch} +} + +// Close closes all channels currently subscribed to the specified topics. +// If a channel is subscribed to multiple topics, some of which is +// not specified, it is not closed. +func (ps *PubSub) Close(topics ...string) { + ps.cmdChan <- cmd{op: closeTopic, topics: topics} +} + +// Shutdown closes all subscribed channels and terminates the goroutine. +func (ps *PubSub) Shutdown() { + ps.cmdChan <- cmd{op: shutdown} +} + +func (ps *PubSub) start() { + reg := registry{ + topics: make(map[string]map[chan interface{}]bool), + revTopics: make(map[chan interface{}]map[string]bool), + } + +loop: + for cmd := range ps.cmdChan { + if cmd.topics == nil { + switch cmd.op { + case unsubAll: + reg.removeChannel(cmd.ch) + + case shutdown: + break loop + } + + continue loop + } + + for _, topic := range cmd.topics { + switch cmd.op { + case sub: + reg.add(topic, cmd.ch, false) + + case subOnce: + reg.add(topic, cmd.ch, true) + + case pub: + reg.send(topic, cmd.msg) + + case unsub: + reg.remove(topic, cmd.ch) + + case closeTopic: + reg.removeTopic(topic) + } + } + } + + for topic, chans := range reg.topics { + for ch, _ := range chans { + reg.remove(topic, ch) + } + } +} + +// registry maintains the current subscription state. It's not +// safe to access a registry from multiple goroutines simultaneously. +type registry struct { + topics map[string]map[chan interface{}]bool + revTopics map[chan interface{}]map[string]bool +} + +func (reg *registry) add(topic string, ch chan interface{}, once bool) { + if reg.topics[topic] == nil { + reg.topics[topic] = make(map[chan interface{}]bool) + } + reg.topics[topic][ch] = once + + if reg.revTopics[ch] == nil { + reg.revTopics[ch] = make(map[string]bool) + } + reg.revTopics[ch][topic] = true +} + +func (reg *registry) send(topic string, msg interface{}) { + for ch, once := range reg.topics[topic] { + ch <- msg + if once { + for topic := range reg.revTopics[ch] { + reg.remove(topic, ch) + } + } + } +} + +func (reg *registry) removeTopic(topic string) { + for ch := range reg.topics[topic] { + reg.remove(topic, ch) + } +} + +func (reg *registry) removeChannel(ch chan interface{}) { + for topic := range reg.revTopics[ch] { + reg.remove(topic, ch) + } +} + +func (reg *registry) remove(topic string, ch chan interface{}) { + if _, ok := reg.topics[topic]; !ok { + return + } + + if _, ok := reg.topics[topic][ch]; !ok { + return + } + + delete(reg.topics[topic], ch) + delete(reg.revTopics[ch], topic) + + if len(reg.topics[topic]) == 0 { + delete(reg.topics, topic) + } + + if len(reg.revTopics[ch]) == 0 { + close(ch) + delete(reg.revTopics, ch) + } +} diff --git a/Godeps/_workspace/src/github.com/tuxychandru/pubsub/pubsub_test.go b/Godeps/_workspace/src/github.com/tuxychandru/pubsub/pubsub_test.go new file mode 100644 index 00000000000..16392d33bc7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/tuxychandru/pubsub/pubsub_test.go @@ -0,0 +1,230 @@ +// Copyright 2013, Chandra Sekar S. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the README.md file. + +package pubsub + +import ( + check "launchpad.net/gocheck" + "runtime" + "testing" + "time" +) + +var _ = check.Suite(new(Suite)) + +func Test(t *testing.T) { + check.TestingT(t) +} + +type Suite struct{} + +func (s *Suite) TestSub(c *check.C) { + ps := New(1) + ch1 := ps.Sub("t1") + ch2 := ps.Sub("t1") + ch3 := ps.Sub("t2") + + ps.Pub("hi", "t1") + c.Check(<-ch1, check.Equals, "hi") + c.Check(<-ch2, check.Equals, "hi") + + ps.Pub("hello", "t2") + c.Check(<-ch3, check.Equals, "hello") + + ps.Shutdown() + _, ok := <-ch1 + c.Check(ok, check.Equals, false) + _, ok = <-ch2 + c.Check(ok, check.Equals, false) + _, ok = <-ch3 + c.Check(ok, check.Equals, false) +} + +func (s *Suite) TestSubOnce(c *check.C) { + ps := New(1) + ch := ps.SubOnce("t1") + + ps.Pub("hi", "t1") + c.Check(<-ch, check.Equals, "hi") + + _, ok := <-ch + c.Check(ok, check.Equals, false) + ps.Shutdown() +} + +func (s *Suite) TestAddSub(c *check.C) { + ps := New(1) + ch1 := ps.Sub("t1") + ch2 := ps.Sub("t2") + + ps.Pub("hi1", "t1") + c.Check(<-ch1, check.Equals, "hi1") + + ps.Pub("hi2", "t2") + c.Check(<-ch2, check.Equals, "hi2") + + ps.AddSub(ch1, "t2", "t3") + ps.Pub("hi3", "t2") + c.Check(<-ch1, check.Equals, "hi3") + c.Check(<-ch2, check.Equals, "hi3") + + ps.Pub("hi4", "t3") + c.Check(<-ch1, check.Equals, "hi4") + + ps.Shutdown() +} + +func (s *Suite) TestUnsub(c *check.C) { + ps := New(1) + ch := ps.Sub("t1") + + ps.Pub("hi", "t1") + c.Check(<-ch, check.Equals, "hi") + + ps.Unsub(ch, "t1") + _, ok := <-ch + c.Check(ok, check.Equals, false) + ps.Shutdown() +} + +func (s *Suite) TestUnsubAll(c *check.C) { + ps := New(1) + ch1 := ps.Sub("t1", "t2", "t3") + ch2 := ps.Sub("t1", "t3") + + ps.Unsub(ch1) + + m, ok := <-ch1 + c.Check(ok, check.Equals, false) + + ps.Pub("hi", "t1") + m, ok = <-ch2 + c.Check(m, check.Equals, "hi") + + ps.Shutdown() +} + +func (s *Suite) TestClose(c *check.C) { + ps := New(1) + ch1 := ps.Sub("t1") + ch2 := ps.Sub("t1") + ch3 := ps.Sub("t2") + ch4 := ps.Sub("t3") + + ps.Pub("hi", "t1") + ps.Pub("hello", "t2") + c.Check(<-ch1, check.Equals, "hi") + c.Check(<-ch2, check.Equals, "hi") + c.Check(<-ch3, check.Equals, "hello") + + ps.Close("t1", "t2") + _, ok := <-ch1 + c.Check(ok, check.Equals, false) + _, ok = <-ch2 + c.Check(ok, check.Equals, false) + _, ok = <-ch3 + c.Check(ok, check.Equals, false) + + ps.Pub("welcome", "t3") + c.Check(<-ch4, check.Equals, "welcome") + + ps.Shutdown() +} + +func (s *Suite) TestUnsubAfterClose(c *check.C) { + ps := New(1) + ch := ps.Sub("t1") + defer func() { + ps.Unsub(ch, "t1") + ps.Shutdown() + }() + + ps.Close("t1") + _, ok := <-ch + c.Check(ok, check.Equals, false) +} + +func (s *Suite) TestShutdown(c *check.C) { + start := runtime.NumGoroutine() + New(10).Shutdown() + time.Sleep(1) + c.Check(runtime.NumGoroutine()-start, check.Equals, 1) +} + +func (s *Suite) TestMultiSub(c *check.C) { + ps := New(1) + ch := ps.Sub("t1", "t2") + + ps.Pub("hi", "t1") + c.Check(<-ch, check.Equals, "hi") + + ps.Pub("hello", "t2") + c.Check(<-ch, check.Equals, "hello") + + ps.Shutdown() + _, ok := <-ch + c.Check(ok, check.Equals, false) +} + +func (s *Suite) TestMultiSubOnce(c *check.C) { + ps := New(1) + ch := ps.SubOnce("t1", "t2") + + ps.Pub("hi", "t1") + c.Check(<-ch, check.Equals, "hi") + + ps.Pub("hello", "t2") + + _, ok := <-ch + c.Check(ok, check.Equals, false) + ps.Shutdown() +} + +func (s *Suite) TestMultiPub(c *check.C) { + ps := New(1) + ch1 := ps.Sub("t1") + ch2 := ps.Sub("t2") + + ps.Pub("hi", "t1", "t2") + c.Check(<-ch1, check.Equals, "hi") + c.Check(<-ch2, check.Equals, "hi") + + ps.Shutdown() +} + +func (s *Suite) TestMultiUnsub(c *check.C) { + ps := New(1) + ch := ps.Sub("t1", "t2", "t3") + + ps.Unsub(ch, "t1") + + ps.Pub("hi", "t1") + + ps.Pub("hello", "t2") + c.Check(<-ch, check.Equals, "hello") + + ps.Unsub(ch, "t2", "t3") + _, ok := <-ch + c.Check(ok, check.Equals, false) + + ps.Shutdown() +} + +func (s *Suite) TestMultiClose(c *check.C) { + ps := New(1) + ch := ps.Sub("t1", "t2") + + ps.Pub("hi", "t1") + c.Check(<-ch, check.Equals, "hi") + + ps.Close("t1") + ps.Pub("hello", "t2") + c.Check(<-ch, check.Equals, "hello") + + ps.Close("t2") + _, ok := <-ch + c.Check(ok, check.Equals, false) + + ps.Shutdown() +} diff --git a/bitswap/bitswap.go b/bitswap/bitswap.go index bcb16b747f7..49449f82cf3 100644 --- a/bitswap/bitswap.go +++ b/bitswap/bitswap.go @@ -1,19 +1,23 @@ package bitswap import ( + "errors" "time" - proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + bsmsg "github.com/jbenet/go-ipfs/bitswap/message" + bsnet "github.com/jbenet/go-ipfs/bitswap/network" + notifications "github.com/jbenet/go-ipfs/bitswap/notifications" blocks "github.com/jbenet/go-ipfs/blocks" + blockstore "github.com/jbenet/go-ipfs/blockstore" peer "github.com/jbenet/go-ipfs/peer" - routing "github.com/jbenet/go-ipfs/routing" - dht "github.com/jbenet/go-ipfs/routing/dht" - swarm "github.com/jbenet/go-ipfs/swarm" u "github.com/jbenet/go-ipfs/util" ) +// TODO(brian): ensure messages are being received + // PartnerWantListMax is the bound for the number of keys we'll store per // partner. These are usually taken from the top of the Partner's WantList // advertisements. WantLists are sorted in terms of priority. @@ -23,28 +27,27 @@ const PartnerWantListMax = 10 // access/lookups. type KeySet map[u.Key]struct{} -// BitSwap instances implement the bitswap protocol. -type BitSwap struct { +// bitswap instances implement the bitswap protocol. +type bitswap struct { // peer is the identity of this (local) node. peer *peer.Peer - // net holds the connections to all peers. - net swarm.Network - meschan *swarm.Chan + // sender delivers messages on behalf of the session + sender bsnet.NetworkAdapter - // datastore is the local database // Ledgers of known - datastore ds.Datastore + // blockstore is the local database + blockstore blockstore.Blockstore // routing interface for communication - routing *dht.IpfsDHT + routing Directory - listener *swarm.MessageListener + notifications notifications.PubSub // partners is a map of currently active bitswap relationships. // The Ledger has the peer.ID, and the peer connection works through net. // Ledgers of known relationships (active or inactive) stored in datastore. // Changes to the Ledger should be committed to the datastore. - partners LedgerMap + partners ledgerMap // haveList is the set of keys we have values for. a map for fast lookups. // haveList KeySet -- not needed. all values in datastore? @@ -52,38 +55,40 @@ type BitSwap struct { // wantList is the set of keys we want values for. a map for fast lookups. wantList KeySet - strategy StrategyFunc + strategy strategyFunc haltChan chan struct{} } -// NewBitSwap creates a new BitSwap instance. It does not check its parameters. -func NewBitSwap(p *peer.Peer, net swarm.Network, d ds.Datastore, r routing.IpfsRouting) *BitSwap { - bs := &BitSwap{ - peer: p, - net: net, - datastore: d, - partners: LedgerMap{}, - wantList: KeySet{}, - routing: r.(*dht.IpfsDHT), - meschan: net.GetChannel(swarm.PBWrapper_BITSWAP), - haltChan: make(chan struct{}), - listener: swarm.NewMessageListener(), +// NewSession initializes a bitswap session. +func NewSession(parent context.Context, s bsnet.NetworkService, p *peer.Peer, d ds.Datastore, directory Directory) Exchange { + + receiver := bsnet.Forwarder{} + bs := &bitswap{ + peer: p, + blockstore: blockstore.NewBlockstore(d), + partners: ledgerMap{}, + wantList: KeySet{}, + routing: directory, + sender: bsnet.NewNetworkAdapter(s, &receiver), + haltChan: make(chan struct{}), + notifications: notifications.New(), + strategy: yesManStrategy, } + receiver.Delegate(bs) - go bs.handleMessages() return bs } // GetBlock attempts to retrieve a particular block from peers, within timeout. -func (bs *BitSwap) GetBlock(k u.Key, timeout time.Duration) ( +func (bs *bitswap) Block(k u.Key, timeout time.Duration) ( *blocks.Block, error) { u.DOut("Bitswap GetBlock: '%s'\n", k.Pretty()) begin := time.Now() tleft := timeout - time.Now().Sub(begin) provs_ch := bs.routing.FindProvidersAsync(k, 20, timeout) - valchan := make(chan []byte) + blockChannel := make(chan blocks.Block) after := time.After(tleft) // TODO: when the data is received, shut down this for loop ASAP @@ -96,7 +101,7 @@ func (bs *BitSwap) GetBlock(k u.Key, timeout time.Duration) ( return } select { - case valchan <- blk: + case blockChannel <- *blk: default: } }(p) @@ -104,36 +109,35 @@ func (bs *BitSwap) GetBlock(k u.Key, timeout time.Duration) ( }() select { - case blkdata := <-valchan: - close(valchan) - return blocks.NewBlock(blkdata) + case block := <-blockChannel: + close(blockChannel) + return &block, nil case <-after: return nil, u.ErrTimeout } } -func (bs *BitSwap) getBlock(k u.Key, p *peer.Peer, timeout time.Duration) ([]byte, error) { +func (bs *bitswap) getBlock(k u.Key, p *peer.Peer, timeout time.Duration) (*blocks.Block, error) { u.DOut("[%s] getBlock '%s' from [%s]\n", bs.peer.ID.Pretty(), k.Pretty(), p.ID.Pretty()) - message := newMessage() - message.AppendWanted(k) + ctx, _ := context.WithTimeout(context.Background(), timeout) + blockChannel := bs.notifications.Subscribe(ctx, k) - after := time.After(timeout) - resp := bs.listener.Listen(string(k), 1, timeout) - bs.meschan.Outgoing <- message.ToSwarm(p) + message := bsmsg.New() + message.AppendWanted(k) + bs.sender.SendMessage(ctx, p, message) - select { - case resp_mes := <-resp: - return resp_mes.Data, nil - case <-after: + block, ok := <-blockChannel + if !ok { u.PErr("getBlock for '%s' timed out.\n", k.Pretty()) return nil, u.ErrTimeout } + return &block, nil } -// HaveBlock announces the existance of a block to BitSwap, potentially sending +// HasBlock announces the existance of a block to bitswap, potentially sending // it to peers (Partners) whose WantLists include it. -func (bs *BitSwap) HaveBlock(blk *blocks.Block) error { +func (bs *bitswap) HasBlock(blk blocks.Block) error { go func() { for _, ledger := range bs.partners { if ledger.WantListContains(blk.Key()) { @@ -147,132 +151,99 @@ func (bs *BitSwap) HaveBlock(blk *blocks.Block) error { return bs.routing.Provide(blk.Key()) } -func (bs *BitSwap) SendBlock(p *peer.Peer, b *blocks.Block) { - message := newMessage() +// TODO(brian): get a return value +func (bs *bitswap) SendBlock(p *peer.Peer, b blocks.Block) { + u.DOut("Sending block to peer.\n") + message := bsmsg.New() + // TODO(brian): change interface to accept value instead of pointer message.AppendBlock(b) - bs.meschan.Outgoing <- message.ToSwarm(p) -} - -func (bs *BitSwap) handleMessages() { - for { - select { - case mes := <-bs.meschan.Incoming: - pmes := new(PBMessage) - err := proto.Unmarshal(mes.Data, pmes) - if err != nil { - u.PErr("%v\n", err) - continue - } - if pmes.Blocks != nil { - for _, blkData := range pmes.Blocks { - blk, err := blocks.NewBlock(blkData) - if err != nil { - u.PErr("%v\n", err) - continue - } - go bs.blockReceive(mes.Peer, blk) - } - } - - if pmes.Wantlist != nil { - for _, want := range pmes.Wantlist { - go bs.peerWantsBlock(mes.Peer, want) - } - } - case <-bs.haltChan: - return - } - } + bs.sender.SendMessage(context.Background(), p, message) } // peerWantsBlock will check if we have the block in question, // and then if we do, check the ledger for whether or not we should send it. -func (bs *BitSwap) peerWantsBlock(p *peer.Peer, want string) { - u.DOut("peer [%s] wants block [%s]\n", p.ID.Pretty(), u.Key(want).Pretty()) +func (bs *bitswap) peerWantsBlock(p *peer.Peer, wanted u.Key) { + u.DOut("peer [%s] wants block [%s]\n", p.ID.Pretty(), wanted.Pretty()) + ledger := bs.getLedger(p) - dsk := ds.NewKey(want) - blk_i, err := bs.datastore.Get(dsk) - if err != nil { - if err == ds.ErrNotFound { - ledger.Wants(u.Key(want)) - } - u.PErr("datastore get error: %v\n", err) + if !ledger.ShouldSend() { return } - blk, ok := blk_i.([]byte) - if !ok { - u.PErr("data conversion error.\n") + block, err := bs.blockstore.Get(wanted) + if err != nil { // TODO(brian): log/return the error + ledger.Wants(wanted) return } - - if ledger.ShouldSend() { - u.DOut("Sending block to peer.\n") - bblk, err := blocks.NewBlock(blk) - if err != nil { - u.PErr("newBlock error: %v\n", err) - return - } - bs.SendBlock(p, bblk) - ledger.SentBytes(len(blk)) - } else { - u.DOut("Decided not to send block.") - } + bs.SendBlock(p, *block) + ledger.SentBytes(numBytes(*block)) } -func (bs *BitSwap) blockReceive(p *peer.Peer, blk *blocks.Block) { +// TODO(brian): return error +func (bs *bitswap) blockReceive(p *peer.Peer, blk blocks.Block) { u.DOut("blockReceive: %s\n", blk.Key().Pretty()) - err := bs.datastore.Put(ds.NewKey(string(blk.Key())), blk.Data) + err := bs.blockstore.Put(blk) if err != nil { u.PErr("blockReceive error: %v\n", err) return } - mes := &swarm.Message{ - Peer: p, - Data: blk.Data, - } - bs.listener.Respond(string(blk.Key()), mes) + bs.notifications.Publish(blk) ledger := bs.getLedger(p) ledger.ReceivedBytes(len(blk.Data)) } -func (bs *BitSwap) getLedger(p *peer.Peer) *Ledger { +func (bs *bitswap) getLedger(p *peer.Peer) *ledger { l, ok := bs.partners[p.Key()] if ok { return l } - l = new(Ledger) + l = new(ledger) l.Strategy = bs.strategy l.Partner = p bs.partners[p.Key()] = l return l } -func (bs *BitSwap) SendWantList(wl KeySet) error { - message := newMessage() +func (bs *bitswap) SendWantList(wl KeySet) error { + message := bsmsg.New() for k, _ := range wl { message.AppendWanted(k) } // Lets just ping everybody all at once for _, ledger := range bs.partners { - bs.meschan.Outgoing <- message.ToSwarm(ledger.Partner) + bs.sender.SendMessage(context.TODO(), ledger.Partner, message) } return nil } -func (bs *BitSwap) Halt() { +func (bs *bitswap) Halt() { bs.haltChan <- struct{}{} } -func (bs *BitSwap) SetStrategy(sf StrategyFunc) { - bs.strategy = sf - for _, ledger := range bs.partners { - ledger.Strategy = sf +func (bs *bitswap) ReceiveMessage( + ctx context.Context, sender *peer.Peer, incoming bsmsg.BitSwapMessage) ( + *peer.Peer, bsmsg.BitSwapMessage, error) { + if incoming.Blocks() != nil { + for _, block := range incoming.Blocks() { + go bs.blockReceive(sender, block) + } } + + if incoming.Wantlist() != nil { + for _, want := range incoming.Wantlist() { + // TODO(brian): return the block synchronously + go bs.peerWantsBlock(sender, want) + } + } + return nil, nil, errors.New("TODO implement") +} + +func numBytes(b blocks.Block) int { + return len(b.Data) } diff --git a/bitswap/interface.go b/bitswap/interface.go new file mode 100644 index 00000000000..73c3ba60337 --- /dev/null +++ b/bitswap/interface.go @@ -0,0 +1,28 @@ +package bitswap + +import ( + "time" + + blocks "github.com/jbenet/go-ipfs/blocks" + peer "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" +) + +type Exchange interface { + + // Block returns the block associated with a given key. + // TODO(brian): pass a context instead of a timeout + Block(k u.Key, timeout time.Duration) (*blocks.Block, error) + + // HasBlock asserts the existence of this block + // TODO(brian): rename -> HasBlock + // TODO(brian): accept a value, not a pointer + // TODO(brian): remove error return value. Should callers be concerned with + // whether the block was made available on the network? + HasBlock(blocks.Block) error +} + +type Directory interface { + FindProvidersAsync(u.Key, int, time.Duration) <-chan *peer.Peer + Provide(key u.Key) error +} diff --git a/bitswap/ledger.go b/bitswap/ledger.go index 6ddc0a71107..37731ebf89f 100644 --- a/bitswap/ledger.go +++ b/bitswap/ledger.go @@ -8,8 +8,8 @@ import ( u "github.com/jbenet/go-ipfs/util" ) -// Ledger stores the data exchange relationship between two peers. -type Ledger struct { +// ledger stores the data exchange relationship between two peers. +type ledger struct { lock sync.RWMutex // Partner is the remote Peer. @@ -30,20 +30,20 @@ type Ledger struct { // wantList is a (bounded, small) set of keys that Partner desires. wantList KeySet - Strategy StrategyFunc + Strategy strategyFunc } // LedgerMap lists Ledgers by their Partner key. -type LedgerMap map[u.Key]*Ledger +type ledgerMap map[u.Key]*ledger -func (l *Ledger) ShouldSend() bool { +func (l *ledger) ShouldSend() bool { l.lock.Lock() defer l.lock.Unlock() return l.Strategy(l) } -func (l *Ledger) SentBytes(n int) { +func (l *ledger) SentBytes(n int) { l.lock.Lock() defer l.lock.Unlock() @@ -52,7 +52,7 @@ func (l *Ledger) SentBytes(n int) { l.Accounting.BytesSent += uint64(n) } -func (l *Ledger) ReceivedBytes(n int) { +func (l *ledger) ReceivedBytes(n int) { l.lock.Lock() defer l.lock.Unlock() @@ -62,14 +62,14 @@ func (l *Ledger) ReceivedBytes(n int) { } // TODO: this needs to be different. We need timeouts. -func (l *Ledger) Wants(k u.Key) { +func (l *ledger) Wants(k u.Key) { l.lock.Lock() defer l.lock.Unlock() l.wantList[k] = struct{}{} } -func (l *Ledger) WantListContains(k u.Key) bool { +func (l *ledger) WantListContains(k u.Key) bool { l.lock.RLock() defer l.lock.RUnlock() @@ -77,7 +77,7 @@ func (l *Ledger) WantListContains(k u.Key) bool { return ok } -func (l *Ledger) ExchangeCount() uint64 { +func (l *ledger) ExchangeCount() uint64 { l.lock.RLock() defer l.lock.RUnlock() return l.exchangeCount diff --git a/bitswap/ledger_test.go b/bitswap/ledger_test.go index d651d485ff7..b2bf9ee5f70 100644 --- a/bitswap/ledger_test.go +++ b/bitswap/ledger_test.go @@ -7,7 +7,7 @@ import ( func TestRaceConditions(t *testing.T) { const numberOfExpectedExchanges = 10000 - l := new(Ledger) + l := new(ledger) var wg sync.WaitGroup for i := 0; i < numberOfExpectedExchanges; i++ { wg.Add(1) diff --git a/bitswap/message.go b/bitswap/message.go deleted file mode 100644 index 94bb82ef89b..00000000000 --- a/bitswap/message.go +++ /dev/null @@ -1,38 +0,0 @@ -package bitswap - -import ( - blocks "github.com/jbenet/go-ipfs/blocks" - peer "github.com/jbenet/go-ipfs/peer" - swarm "github.com/jbenet/go-ipfs/swarm" - u "github.com/jbenet/go-ipfs/util" -) - -// message wraps a proto message for convenience -type message struct { - pb PBMessage -} - -func newMessageFromProto(pb PBMessage) *message { - return &message{pb: pb} -} - -func newMessage() *message { - return new(message) -} - -func (m *message) AppendWanted(k u.Key) { - m.pb.Wantlist = append(m.pb.Wantlist, string(k)) -} - -func (m *message) AppendBlock(b *blocks.Block) { - m.pb.Blocks = append(m.pb.Blocks, b.Data) -} - -func (m *message) ToProto() *PBMessage { - cp := m.pb - return &cp -} - -func (m *message) ToSwarm(p *peer.Peer) *swarm.Message { - return swarm.NewMessage(p, m.ToProto()) -} diff --git a/bitswap/message/Makefile b/bitswap/message/Makefile new file mode 100644 index 00000000000..5bbebea075a --- /dev/null +++ b/bitswap/message/Makefile @@ -0,0 +1,8 @@ +# TODO(brian): add proto tasks +all: message.pb.go + +message.pb.go: message.proto + protoc --gogo_out=. --proto_path=../../../../../:/usr/local/opt/protobuf/include:. $< + +clean: + rm message.pb.go diff --git a/bitswap/message/message.go b/bitswap/message/message.go new file mode 100644 index 00000000000..dc65063137d --- /dev/null +++ b/bitswap/message/message.go @@ -0,0 +1,81 @@ +package message + +import ( + "errors" + + netmsg "github.com/jbenet/go-ipfs/net/message" + + blocks "github.com/jbenet/go-ipfs/blocks" + nm "github.com/jbenet/go-ipfs/net/message" + peer "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" +) + +type BitSwapMessage interface { + Wantlist() []u.Key + Blocks() []blocks.Block + AppendWanted(k u.Key) + AppendBlock(b blocks.Block) + Exportable +} + +type Exportable interface { + ToProto() *PBMessage + ToNet(p *peer.Peer) (nm.NetMessage, error) +} + +// message wraps a proto message for convenience +type message struct { + pb PBMessage +} + +func newMessageFromProto(pb PBMessage) *message { + return &message{pb: pb} +} + +func New() *message { + return new(message) +} + +// TODO(brian): convert these into keys +func (m *message) Wantlist() []u.Key { + wl := make([]u.Key, len(m.pb.Wantlist)) + for _, str := range m.pb.Wantlist { + wl = append(wl, u.Key(str)) + } + return wl +} + +// TODO(brian): convert these into blocks +func (m *message) Blocks() []blocks.Block { + bs := make([]blocks.Block, len(m.pb.Blocks)) + for _, data := range m.pb.Blocks { + b, err := blocks.NewBlock(data) + if err != nil { + continue + } + bs = append(bs, *b) + } + return bs +} + +func (m *message) AppendWanted(k u.Key) { + m.pb.Wantlist = append(m.pb.Wantlist, string(k)) +} + +func (m *message) AppendBlock(b blocks.Block) { + m.pb.Blocks = append(m.pb.Blocks, b.Data) +} + +func FromNet(nmsg netmsg.NetMessage) (BitSwapMessage, error) { + return nil, errors.New("TODO implement") +} + +func (m *message) ToProto() *PBMessage { + cp := m.pb + return &cp +} + +func (m *message) ToNet(p *peer.Peer) (nm.NetMessage, error) { + return nm.FromObject(p, m.ToProto()) +} diff --git a/bitswap/message.pb.go b/bitswap/message/message.pb.go similarity index 98% rename from bitswap/message.pb.go rename to bitswap/message/message.pb.go index a340ca0733d..d1089f5c94a 100644 --- a/bitswap/message.pb.go +++ b/bitswap/message/message.pb.go @@ -11,7 +11,7 @@ It is generated from these files: It has these top-level messages: PBMessage */ -package bitswap +package message import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" import math "math" diff --git a/bitswap/message.proto b/bitswap/message/message.proto similarity index 82% rename from bitswap/message.proto rename to bitswap/message/message.proto index b025ac3c3c7..a0e4d19972c 100644 --- a/bitswap/message.proto +++ b/bitswap/message/message.proto @@ -1,4 +1,4 @@ -package bitswap; +package message; message PBMessage { repeated string wantlist = 1; diff --git a/bitswap/message_test.go b/bitswap/message/message_test.go similarity index 86% rename from bitswap/message_test.go rename to bitswap/message/message_test.go index bc52b5aa9ed..8ff345f1cc6 100644 --- a/bitswap/message_test.go +++ b/bitswap/message/message_test.go @@ -1,16 +1,16 @@ -package bitswap +package message import ( "bytes" "testing" - blocks "github.com/jbenet/go-ipfs/blocks" u "github.com/jbenet/go-ipfs/util" + testutil "github.com/jbenet/go-ipfs/util/testutil" ) func TestAppendWanted(t *testing.T) { const str = "foo" - m := newMessage() + m := New() m.AppendWanted(u.Key(str)) if !contains(m.ToProto().GetWantlist(), str) { @@ -37,12 +37,9 @@ func TestAppendBlock(t *testing.T) { strs = append(strs, "Celeritas") strs = append(strs, "Incendia") - m := newMessage() + m := New() for _, str := range strs { - block, err := blocks.NewBlock([]byte(str)) - if err != nil { - t.Fail() - } + block := testutil.NewBlockOrFail(t, str) m.AppendBlock(block) } @@ -57,7 +54,7 @@ func TestAppendBlock(t *testing.T) { func TestCopyProtoByValue(t *testing.T) { const str = "foo" - m := newMessage() + m := New() protoBeforeAppend := m.ToProto() m.AppendWanted(u.Key(str)) if contains(protoBeforeAppend.GetWantlist(), str) { diff --git a/bitswap/network/forwarder.go b/bitswap/network/forwarder.go new file mode 100644 index 00000000000..f4eba0c1459 --- /dev/null +++ b/bitswap/network/forwarder.go @@ -0,0 +1,28 @@ +package network + +import ( + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + bsmsg "github.com/jbenet/go-ipfs/bitswap/message" + peer "github.com/jbenet/go-ipfs/peer" +) + +// Forwarder receives messages and forwards them to the delegate. +// +// Forwarder breaks the circular dependency between the BitSwap Session and the +// Network Service. +type Forwarder struct { + delegate Receiver +} + +func (r *Forwarder) ReceiveMessage( + ctx context.Context, sender *peer.Peer, incoming bsmsg.BitSwapMessage) ( + *peer.Peer, bsmsg.BitSwapMessage, error) { + if r.delegate == nil { + return nil, nil, nil + } + return r.delegate.ReceiveMessage(ctx, sender, incoming) +} + +func (r *Forwarder) Delegate(delegate Receiver) { + r.delegate = delegate +} diff --git a/bitswap/network/forwarder_test.go b/bitswap/network/forwarder_test.go new file mode 100644 index 00000000000..accc2c781f1 --- /dev/null +++ b/bitswap/network/forwarder_test.go @@ -0,0 +1,26 @@ +package network + +import ( + "testing" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + bsmsg "github.com/jbenet/go-ipfs/bitswap/message" + peer "github.com/jbenet/go-ipfs/peer" +) + +func TestDoesntPanicIfDelegateNotPresent(t *testing.T) { + fwdr := Forwarder{} + fwdr.ReceiveMessage(context.Background(), &peer.Peer{}, bsmsg.New()) +} + +func TestForwardsMessageToDelegate(t *testing.T) { + fwdr := Forwarder{delegate: &EchoDelegate{}} + fwdr.ReceiveMessage(context.Background(), &peer.Peer{}, bsmsg.New()) +} + +type EchoDelegate struct{} + +func (d *EchoDelegate) ReceiveMessage(ctx context.Context, p *peer.Peer, + incoming bsmsg.BitSwapMessage) (*peer.Peer, bsmsg.BitSwapMessage, error) { + return p, incoming, nil +} diff --git a/bitswap/network/interface.go b/bitswap/network/interface.go new file mode 100644 index 00000000000..82f30c5c2ba --- /dev/null +++ b/bitswap/network/interface.go @@ -0,0 +1,51 @@ +package network + +import ( + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + netservice "github.com/jbenet/go-ipfs/net/service" + + bsmsg "github.com/jbenet/go-ipfs/bitswap/message" + netmsg "github.com/jbenet/go-ipfs/net/message" + peer "github.com/jbenet/go-ipfs/peer" +) + +// NetAdapter mediates the exchange's communication with the network. +type NetAdapter interface { + + // SendMessage sends a BitSwap message to a peer. + SendMessage( + context.Context, + *peer.Peer, + bsmsg.BitSwapMessage) error + + // SendRequest sends a BitSwap message to a peer and waits for a response. + SendRequest( + context.Context, + *peer.Peer, + bsmsg.BitSwapMessage) (incoming bsmsg.BitSwapMessage, err error) + + // HandleMessage marshals and unmarshals net messages, forwarding them to the + // BitSwapMessage receiver + HandleMessage( + ctx context.Context, + incoming netmsg.NetMessage) (netmsg.NetMessage, error) + + // SetDelegate registers the Reciver to handle messages received from the + // network. + SetDelegate(Receiver) +} + + +//Receiver gets the bitswap message from the sender and outputs the destination for it +type Receiver interface { + ReceiveMessage( + ctx context.Context, sender *peer.Peer, incoming bsmsg.BitSwapMessage) ( + destination *peer.Peer, outgoing bsmsg.BitSwapMessage, err error) +} + +// TODO(brian): move this to go-ipfs/net package +type NetService interface { + SendRequest(ctx context.Context, m netmsg.NetMessage) (netmsg.NetMessage, error) + SendMessage(ctx context.Context, m netmsg.NetMessage) error + SetHandler(netservice.Handler) +} diff --git a/bitswap/network/network_adapter.go b/bitswap/network/network_adapter.go new file mode 100644 index 00000000000..0f77bbcdb1e --- /dev/null +++ b/bitswap/network/network_adapter.go @@ -0,0 +1,94 @@ +package network + +import ( + "errors" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + + bsmsg "github.com/jbenet/go-ipfs/bitswap/message" + netmsg "github.com/jbenet/go-ipfs/net/message" + peer "github.com/jbenet/go-ipfs/peer" +) + +// networkAdapter implements NetworkAdapter +type networkAdapter struct { + networkService NetService + receiver Receiver +} + +// NewNetworkAdapter wraps a network Service and Receiver to perform translation between +// BitSwapMessage and NetMessage formats. This allows the BitSwap session to +// ignore these details. +func NewNetworkAdapter(s NetworkService, r Receiver) NetAdapter { + adapter := networkAdapter{ + networkService: s, + receiver: r, + } + s.SetHandler(&adapter) + return &adapter +} + +// HandleMessage marshals and unmarshals net messages, forwarding them to the +// BitSwapMessage receiver +func (adapter *networkAdapter) HandleMessage( + ctx context.Context, incoming netmsg.NetMessage) (netmsg.NetMessage, error) { + + if adapter.receiver == nil { + return nil, errors.New("No receiver. NetMessage dropped") + } + + received, err := bsmsg.FromNet(incoming) + if err != nil { + return nil, err + } + + p, bsmsg, err := adapter.receiver.ReceiveMessage(ctx, incoming.Peer(), received) + if err != nil { + return nil, err + } + + // TODO(brian): put this in a helper function + if bsmsg == nil || p == nil { + return nil, nil + } + + outgoing, err := bsmsg.ToNet(p) + if err != nil { + return nil, err + } + + return outgoing, nil +} + +func (adapter *networkAdapter) SendMessage( + ctx context.Context, + p *peer.Peer, + outgoing bsmsg.BitSwapMessage) error { + + nmsg, err := outgoing.ToNet(p) + if err != nil { + return err + } + return adapter.networkService.SendMessage(ctx, nmsg) +} + +func (adapter *networkAdapter) SendRequest( + ctx context.Context, + p *peer.Peer, + outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) { + + outgoingMsg, err := outgoing.ToNet(p) + if err != nil { + return nil, err + } + incomingMsg, err := adapter.networkService.SendRequest(ctx, outgoingMsg) + if err != nil { + return nil, err + } + return bsmsg.FromNet(incomingMsg) +} + +func (adapter *networkAdapter) SetDelegate(r Receiver) { + adapter.receiver = r + +} diff --git a/bitswap/network/network_adapter_test.go b/bitswap/network/network_adapter_test.go new file mode 100644 index 00000000000..aa245203202 --- /dev/null +++ b/bitswap/network/network_adapter_test.go @@ -0,0 +1,113 @@ +package network + +import ( + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" + bsmsg "github.com/jbenet/go-ipfs/bitswap/message" + ci "github.com/jbenet/go-ipfs/crypto" + spipe "github.com/jbenet/go-ipfs/crypto/spipe" + msg "github.com/jbenet/go-ipfs/net/message" + netmsg "github.com/jbenet/go-ipfs/net/message" + mux "github.com/jbenet/go-ipfs/net/mux" + netservice "github.com/jbenet/go-ipfs/net/service" + peer "github.com/jbenet/go-ipfs/peer" + "testing" +) + +type TestProtocol struct { + *msg.Pipe +} + +type S struct{} +type R struct{} + +func (s *S) SendMessage(ctx context.Context, m netmsg.NetMessage) error { + return nil +} +func (s *S) SendRequest(ctx context.Context, m netmsg.NetMessage) (netmsg.NetMessage, error) { + return nil, nil +} +func (s *S) SetHandler(netservice.Handler) {} +func (r *R) ReceiveMessage(ctx context.Context, sender *peer.Peer, incoming bsmsg.BitSwapMessage) ( + destination *peer.Peer, outgoing bsmsg.BitSwapMessage, err error) { + return nil, nil, nil +} + +func newPeer(t *testing.T, id string) *peer.Peer { + mh, err := mh.FromHexString(id) + if err != nil { + t.Error(err) + return nil + } + return &peer.Peer{ID: peer.ID(mh)} +} + +func wrapData(data []byte, pid mux.ProtocolID) ([]byte, error) { + // Marshal + pbm := new(mux.PBProtocolMessage) + pbm.ProtocolID = &pid + pbm.Data = data + b, err := proto.Marshal(pbm) + if err != nil { + return nil, err + } + + return b, nil +} + +func makePeer(addr *ma.Multiaddr) *peer.Peer { + p := new(peer.Peer) + p.AddAddress(addr) + sk, pk, err := ci.GenerateKeyPair(ci.RSA, 512) + if err != nil { + panic(err) + } + p.PrivKey = sk + p.PubKey = pk + id, err := spipe.IDFromPubKey(pk) + if err != nil { + panic(err) + } + + p.ID = id + return p +} + +func TestNetworkAdapter(t *testing.T) { + + s := &S{} + r := &R{} + netAdapter := NewNetworkAdapter(s, r) + + //test for Handle Message + var x = "foo" + pid1 := mux.ProtocolID_Test + d, _ := wrapData([]byte(x), pid1) + peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa") + m2 := msg.New(peer1, d) + ctx := context.Background() + _, errHandle := netAdapter.HandleMessage(ctx, m2) + if errHandle != nil { + //Dependent on the brian's TODO method being implemented, failing otherwise + // t.Error(errHandle) + } + + //test for Send Message + addrA, _ := ma.NewMultiaddr("/ip4/127.0.0.1/tcp/2222") + peerA := makePeer(addrA) + message := bsmsg.New() + errSend := netAdapter.SendMessage(ctx, peerA, message) + if errSend != nil { + t.Error(errSend) + } + + //test for send Request + _, errRequest := netAdapter.SendRequest(ctx, peerA, message) + if errRequest != nil { + //Dependent on the brian's TODO method being implemented, failing otherwise + // t.Error(errRequest) + } + +} diff --git a/bitswap/notifications/notifications.go b/bitswap/notifications/notifications.go new file mode 100644 index 00000000000..2da2b7fadcb --- /dev/null +++ b/bitswap/notifications/notifications.go @@ -0,0 +1,55 @@ +package notifications + +import ( + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + pubsub "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/tuxychandru/pubsub" + + blocks "github.com/jbenet/go-ipfs/blocks" + u "github.com/jbenet/go-ipfs/util" +) + +type PubSub interface { + Publish(block blocks.Block) + Subscribe(ctx context.Context, k u.Key) <-chan blocks.Block + Shutdown() +} + +func New() PubSub { + const bufferSize = 16 + return &impl{*pubsub.New(bufferSize)} +} + +type impl struct { + wrapped pubsub.PubSub +} + +func (ps *impl) Publish(block blocks.Block) { + topic := string(block.Key()) + ps.wrapped.Pub(block, topic) +} + +// Subscribe returns a one-time use |blockChannel|. |blockChannel| returns nil +// if the |ctx| times out or is cancelled. Then channel is closed after the +// block given by |k| is sent. +func (ps *impl) Subscribe(ctx context.Context, k u.Key) <-chan blocks.Block { + topic := string(k) + subChan := ps.wrapped.SubOnce(topic) + blockChannel := make(chan blocks.Block) + go func() { + defer close(blockChannel) + select { + case val := <-subChan: + block, ok := val.(blocks.Block) + if ok { + blockChannel <- block + } + case <-ctx.Done(): + ps.wrapped.Unsub(subChan, topic) + } + }() + return blockChannel +} + +func (ps *impl) Shutdown() { + ps.wrapped.Shutdown() +} diff --git a/bitswap/notifications/notifications_test.go b/bitswap/notifications/notifications_test.go new file mode 100644 index 00000000000..b12cc7d83ad --- /dev/null +++ b/bitswap/notifications/notifications_test.go @@ -0,0 +1,58 @@ +package notifications + +import ( + "bytes" + "testing" + "time" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + testutil "github.com/jbenet/go-ipfs/util/testutil" + + blocks "github.com/jbenet/go-ipfs/blocks" +) + +func TestPublishSubscribe(t *testing.T) { + blockSent := testutil.NewBlockOrFail(t, "Greetings from The Interval") + + n := New() + defer n.Shutdown() + ch := n.Subscribe(context.Background(), blockSent.Key()) + + n.Publish(blockSent) + blockRecvd, ok := <-ch + if !ok { + t.Fail() + } + + assertBlocksEqual(t, blockRecvd, blockSent) + +} + +func TestCarryOnWhenDeadlineExpires(t *testing.T) { + + impossibleDeadline := time.Nanosecond + fastExpiringCtx, _ := context.WithTimeout(context.Background(), impossibleDeadline) + + n := New() + defer n.Shutdown() + block := testutil.NewBlockOrFail(t, "A Missed Connection") + blockChannel := n.Subscribe(fastExpiringCtx, block.Key()) + + assertBlockChannelNil(t, blockChannel) +} + +func assertBlockChannelNil(t *testing.T, blockChannel <-chan blocks.Block) { + _, ok := <-blockChannel + if ok { + t.Fail() + } +} + +func assertBlocksEqual(t *testing.T, a, b blocks.Block) { + if !bytes.Equal(a.Data, b.Data) { + t.Fail() + } + if a.Key() != b.Key() { + t.Fail() + } +} diff --git a/bitswap/offline.go b/bitswap/offline.go new file mode 100644 index 00000000000..d1c0fea148c --- /dev/null +++ b/bitswap/offline.go @@ -0,0 +1,30 @@ +package bitswap + +import ( + "errors" + "time" + + blocks "github.com/jbenet/go-ipfs/blocks" + u "github.com/jbenet/go-ipfs/util" +) + +func NewOfflineExchange() Exchange { + return &offlineExchange{} +} + +// offlineExchange implements the Exchange interface but doesn't return blocks. +// For use in offline mode. +type offlineExchange struct { +} + +// Block returns nil to signal that a block could not be retrieved for the +// given key. +// NB: This function may return before the timeout expires. +func (_ *offlineExchange) Block(k u.Key, timeout time.Duration) (*blocks.Block, error) { + return nil, errors.New("Block unavailable. Operating in offline mode") +} + +// HasBlock always returns nil. +func (_ *offlineExchange) HasBlock(blocks.Block) error { + return nil +} diff --git a/bitswap/offline_test.go b/bitswap/offline_test.go new file mode 100644 index 00000000000..2b40ac5e288 --- /dev/null +++ b/bitswap/offline_test.go @@ -0,0 +1,27 @@ +package bitswap + +import ( + "testing" + "time" + + u "github.com/jbenet/go-ipfs/util" + testutil "github.com/jbenet/go-ipfs/util/testutil" +) + +func TestBlockReturnsErr(t *testing.T) { + off := NewOfflineExchange() + _, err := off.Block(u.Key("foo"), time.Second) + if err != nil { + return // as desired + } + t.Fail() +} + +func TestHasBlockReturnsNil(t *testing.T) { + off := NewOfflineExchange() + block := testutil.NewBlockOrFail(t, "data") + err := off.HasBlock(block) + if err != nil { + t.Fatal("") + } +} diff --git a/bitswap/strategy.go b/bitswap/strategy.go index c216a35c3ad..a2c2db18673 100644 --- a/bitswap/strategy.go +++ b/bitswap/strategy.go @@ -5,13 +5,13 @@ import ( "math/rand" ) -type StrategyFunc func(*Ledger) bool +type strategyFunc func(*ledger) bool -func StandardStrategy(l *Ledger) bool { +func standardStrategy(l *ledger) bool { return rand.Float64() <= probabilitySend(l.Accounting.Value()) } -func YesManStrategy(l *Ledger) bool { +func yesManStrategy(l *ledger) bool { return true } diff --git a/blockservice/blockservice.go b/blockservice/blockservice.go index 8f923c76bb1..0b4f15b9878 100644 --- a/blockservice/blockservice.go +++ b/blockservice/blockservice.go @@ -16,11 +16,11 @@ import ( // It uses an internal `datastore.Datastore` instance to store values. type BlockService struct { Datastore ds.Datastore - Remote *bitswap.BitSwap + Remote bitswap.Exchange } // NewBlockService creates a BlockService with given datastore instance. -func NewBlockService(d ds.Datastore, rem *bitswap.BitSwap) (*BlockService, error) { +func NewBlockService(d ds.Datastore, rem bitswap.Exchange) (*BlockService, error) { if d == nil { return nil, fmt.Errorf("BlockService requires valid datastore") } @@ -35,12 +35,14 @@ func (s *BlockService) AddBlock(b *blocks.Block) (u.Key, error) { k := b.Key() dsk := ds.NewKey(string(k)) u.DOut("storing [%s] in datastore\n", k.Pretty()) + // TODO(brian): define a block datastore with a Put method which accepts a + // block parameter err := s.Datastore.Put(dsk, b.Data) if err != nil { return k, err } if s.Remote != nil { - err = s.Remote.HaveBlock(b) + err = s.Remote.HasBlock(*b) } return k, err } @@ -63,7 +65,7 @@ func (s *BlockService) GetBlock(k u.Key) (*blocks.Block, error) { }, nil } else if err == ds.ErrNotFound && s.Remote != nil { u.DOut("Blockservice: Searching bitswap.\n") - blk, err := s.Remote.GetBlock(k, time.Second*5) + blk, err := s.Remote.Block(k, time.Second*5) if err != nil { return nil, err } diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go new file mode 100644 index 00000000000..a4fc1f65d85 --- /dev/null +++ b/blockstore/blockstore.go @@ -0,0 +1,47 @@ +package blockstore + +import ( + "errors" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + + blocks "github.com/jbenet/go-ipfs/blocks" + u "github.com/jbenet/go-ipfs/util" +) + +var ValueTypeMismatch = errors.New("The retrieved value is not a Block") + +type Blockstore interface { + Get(u.Key) (*blocks.Block, error) + Put(blocks.Block) error +} + +func NewBlockstore(d ds.Datastore) Blockstore { + return &blockstore{ + datastore: d, + } +} + +type blockstore struct { + datastore ds.Datastore +} + +func (bs *blockstore) Get(k u.Key) (*blocks.Block, error) { + maybeData, err := bs.datastore.Get(toDatastoreKey(k)) + if err != nil { + return nil, err + } + bdata, ok := maybeData.([]byte) + if !ok { + return nil, ValueTypeMismatch + } + return blocks.NewBlock(bdata) +} + +func (bs *blockstore) Put(block blocks.Block) error { + return bs.datastore.Put(toDatastoreKey(block.Key()), block.Data) +} + +func toDatastoreKey(k u.Key) ds.Key { + return ds.NewKey(string(k)) +} diff --git a/blockstore/blockstore_test.go b/blockstore/blockstore_test.go new file mode 100644 index 00000000000..4b0909d7547 --- /dev/null +++ b/blockstore/blockstore_test.go @@ -0,0 +1,55 @@ +package blockstore + +import ( + "bytes" + "testing" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + u "github.com/jbenet/go-ipfs/util" + testutil "github.com/jbenet/go-ipfs/util/testutil" +) + +// TODO(brian): TestGetReturnsNil + +func TestGetWhenKeyNotPresent(t *testing.T) { + bs := NewBlockstore(ds.NewMapDatastore()) + _, err := bs.Get(u.Key("not present")) + + if err != nil { + t.Log("As expected, block is not present") + return + } + t.Fail() +} + +func TestPutThenGetBlock(t *testing.T) { + bs := NewBlockstore(ds.NewMapDatastore()) + block := testutil.NewBlockOrFail(t, "some data") + + err := bs.Put(block) + if err != nil { + t.Fatal(err) + } + + blockFromBlockstore, err := bs.Get(block.Key()) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(block.Data, blockFromBlockstore.Data) { + t.Fail() + } +} + +func TestValueTypeMismatch(t *testing.T) { + block := testutil.NewBlockOrFail(t, "some data") + + datastore := ds.NewMapDatastore() + datastore.Put(toDatastoreKey(block.Key()), "data that isn't a block!") + + blockstore := NewBlockstore(datastore) + + _, err := blockstore.Get(block.Key()) + if err != ValueTypeMismatch { + t.Fatal(err) + } +} diff --git a/cmd/ipfs/init.go b/cmd/ipfs/init.go index a70e0eb819b..5e404b081a4 100644 --- a/cmd/ipfs/init.go +++ b/cmd/ipfs/init.go @@ -9,7 +9,7 @@ import ( "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/commander" config "github.com/jbenet/go-ipfs/config" ci "github.com/jbenet/go-ipfs/crypto" - identify "github.com/jbenet/go-ipfs/identify" + spipe "github.com/jbenet/go-ipfs/crypto/spipe" u "github.com/jbenet/go-ipfs/util" ) @@ -72,7 +72,7 @@ func initCmd(c *commander.Command, inp []string) error { } cfg.Identity.PrivKey = base64.StdEncoding.EncodeToString(skbytes) - id, err := identify.IDFromPubKey(pk) + id, err := spipe.IDFromPubKey(pk) if err != nil { return err } diff --git a/core/core.go b/core/core.go index 0a9db055a9e..e82245c0f46 100644 --- a/core/core.go +++ b/core/core.go @@ -5,19 +5,23 @@ import ( "errors" "fmt" + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" - "github.com/jbenet/go-ipfs/bitswap" + + bitswap "github.com/jbenet/go-ipfs/bitswap" bserv "github.com/jbenet/go-ipfs/blockservice" config "github.com/jbenet/go-ipfs/config" ci "github.com/jbenet/go-ipfs/crypto" merkledag "github.com/jbenet/go-ipfs/merkledag" + inet "github.com/jbenet/go-ipfs/net" + mux "github.com/jbenet/go-ipfs/net/mux" + netservice "github.com/jbenet/go-ipfs/net/service" path "github.com/jbenet/go-ipfs/path" peer "github.com/jbenet/go-ipfs/peer" routing "github.com/jbenet/go-ipfs/routing" dht "github.com/jbenet/go-ipfs/routing/dht" - swarm "github.com/jbenet/go-ipfs/swarm" u "github.com/jbenet/go-ipfs/util" ) @@ -30,20 +34,20 @@ type IpfsNode struct { // the local node's identity Identity *peer.Peer - // the map of other nodes (Peer instances) - PeerMap *peer.Map + // storage for other Peer instances + Peerstore *peer.Peerstore // the local datastore Datastore ds.Datastore // the network message stream - Swarm *swarm.Swarm + Network inet.Network // the routing system. recommend ipfs-dht Routing routing.IpfsRouting // the block exchange + strategy (bitswap) - BitSwap *bitswap.BitSwap + BitSwap bitswap.BitSwap // the block service, get/add blocks. Blocks *bserv.BlockService @@ -56,6 +60,7 @@ type IpfsNode struct { // the name system, resolves paths to hashes // Namesys *namesys.Namesys + } // NewIpfsNode constructs a new IpfsNode based on the given config. @@ -74,30 +79,48 @@ func NewIpfsNode(cfg *config.Config, online bool) (*IpfsNode, error) { return nil, err } + peerstore := peer.NewPeerstore() + var ( - net *swarm.Swarm + net *inet.Network // TODO: refactor so we can use IpfsRouting interface instead of being DHT-specific - route* dht.IpfsDHT - swap *bitswap.BitSwap + route *dht.IpfsDHT + ) if online { - net = swarm.NewSwarm(local) - err = net.Listen() + // add protocol services here. + ctx := context.TODO() // derive this from a higher context. + + dhts := netservice.Service(nil) // nil handler for now, need to patch it + if err := dhts.Start(ctx); err != nil { + return nil, err + } + + net, err := inet.NewIpfsNetwork(context.TODO(), local, &mux.ProtocolMap{ + netservice.ProtocolID_Routing: dhtService, + // netservice.ProtocolID_Bitswap: bitswapService, + }) if err != nil { return nil, err } - route = dht.NewDHT(local, net, d) + route = dht.NewDHT(local, peerstore, net, dhts, d) + dhts.Handler = route // wire the handler to the service. + + // TODO(brian): pass a context to DHT for its async operations route.Start() - swap = bitswap.NewBitSwap(local, net, d, route) - swap.SetStrategy(bitswap.YesManStrategy) + // TODO(brian): pass a context to bs for its async operations + bitswapSession := bitswap.NewSession(context.TODO(), local, d, route) + // TODO(brian): pass a context to initConnections go initConnections(cfg, route) } - bs, err := bserv.NewBlockService(d, swap) + // TODO(brian): when offline instantiate the BlockService with a bitswap + // session that simply doesn't return blocks + bs, err := bserv.NewBlockService(d, bitswapSession) if err != nil { return nil, err } @@ -106,12 +129,12 @@ func NewIpfsNode(cfg *config.Config, online bool) (*IpfsNode, error) { return &IpfsNode{ Config: cfg, - PeerMap: &peer.Map{}, + Peerstore: peerstore, Datastore: d, Blocks: bs, DAG: dag, Resolver: &path.Resolver{DAG: dag}, - BitSwap: swap, + BitSwap: bitswapSession, Identity: local, Routing: route, }, nil @@ -134,7 +157,7 @@ func initIdentity(cfg *config.Config) (*peer.Peer, error) { return nil, err } - addresses = []*ma.Multiaddr{ maddr } + addresses = []*ma.Multiaddr{maddr} } skb, err := base64.StdEncoding.DecodeString(cfg.Identity.PrivKey) diff --git a/crypto/spipe/Makefile b/crypto/spipe/Makefile new file mode 100644 index 00000000000..7e737b6d87f --- /dev/null +++ b/crypto/spipe/Makefile @@ -0,0 +1,8 @@ + +all: message.pb.go + +message.pb.go: message.proto + protoc --gogo_out=. --proto_path=../../../../../:/usr/local/opt/protobuf/include:. $< + +clean: + rm message.pb.go diff --git a/identify/identify.go b/crypto/spipe/handshake.go similarity index 58% rename from identify/identify.go rename to crypto/spipe/handshake.go index b6b8967809a..8c09aff7a1b 100644 --- a/identify/identify.go +++ b/crypto/spipe/handshake.go @@ -1,6 +1,6 @@ -// Package identify handles how peers identify with eachother upon -// connection to the network -package identify +// Package spipe handles establishing secure communication between two peers. + +package spipe import ( "bytes" @@ -22,85 +22,92 @@ import ( u "github.com/jbenet/go-ipfs/util" ) -// List of supported protocols--each section in order of preference. -// Takes the form: ECDH curves : Ciphers : Hashes +// List of supported ECDH curves var SupportedExchanges = "P-256,P-224,P-384,P-521" + +// List of supported Ciphers var SupportedCiphers = "AES-256,AES-128" + +// List of supported Hashes var SupportedHashes = "SHA256,SHA512,SHA1" // ErrUnsupportedKeyType is returned when a private key cast/type switch fails. var ErrUnsupportedKeyType = errors.New("unsupported key type") -// Performs initial communication with this peer to share node ID's and -// initiate communication. (secureIn, secureOut, error) -func Handshake(self, remote *peer.Peer, in <-chan []byte, out chan<- []byte) (<-chan []byte, chan<- []byte, error) { +// ErrClosed signals the closing of a connection. +var ErrClosed = errors.New("connection closed") + +// handsahke performs initial communication over insecure channel to share +// keys, IDs, and initiate communication. +func (s *SecurePipe) handshake() error { // Generate and send Hello packet. // Hello = (rand, PublicKey, Supported) nonce := make([]byte, 16) _, err := rand.Read(nonce) if err != nil { - return nil, nil, err + return err } - hello := new(Hello) - - myPubKey, err := self.PubKey.Bytes() + myPubKey, err := s.local.PubKey.Bytes() if err != nil { - return nil, nil, err + return err } - hello.Rand = nonce - hello.Pubkey = myPubKey - hello.Exchanges = &SupportedExchanges - hello.Ciphers = &SupportedCiphers - hello.Hashes = &SupportedHashes + proposeMsg := new(Propose) + proposeMsg.Rand = nonce + proposeMsg.Pubkey = myPubKey + proposeMsg.Exchanges = &SupportedExchanges + proposeMsg.Ciphers = &SupportedCiphers + proposeMsg.Hashes = &SupportedHashes - encoded, err := proto.Marshal(hello) + encoded, err := proto.Marshal(proposeMsg) if err != nil { - return nil, nil, err + return err } - out <- encoded + s.insecure.Out <- encoded - // Parse their Hello packet and generate an Exchange packet. + // Parse their Propose packet and generate an Exchange packet. // Exchange = (EphemeralPubKey, Signature) - resp := <-in + var resp []byte + select { + case <-s.ctx.Done(): + return ErrClosed + case resp = <-s.Duplex.In: + } - helloResp := new(Hello) - err = proto.Unmarshal(resp, helloResp) + proposeResp := new(Propose) + err = proto.Unmarshal(resp, proposeResp) if err != nil { - return nil, nil, err + return err } - remote.PubKey, err = ci.UnmarshalPublicKey(helloResp.GetPubkey()) + s.remote.PubKey, err = ci.UnmarshalPublicKey(proposeResp.GetPubkey()) if err != nil { - return nil, nil, err + return err } - remote.ID, err = IDFromPubKey(remote.PubKey) + s.remote.ID, err = IDFromPubKey(s.remote.PubKey) if err != nil { - return nil, nil, err + return err } - exchange, err := selectBest(SupportedExchanges, helloResp.GetExchanges()) + exchange, err := selectBest(SupportedExchanges, proposeResp.GetExchanges()) if err != nil { - return nil, nil, err + return err } - cipherType, err := selectBest(SupportedCiphers, helloResp.GetCiphers()) + cipherType, err := selectBest(SupportedCiphers, proposeResp.GetCiphers()) if err != nil { - return nil, nil, err + return err } - hashType, err := selectBest(SupportedHashes, helloResp.GetHashes()) + hashType, err := selectBest(SupportedHashes, proposeResp.GetHashes()) if err != nil { - return nil, nil, err + return err } epubkey, done, err := ci.GenerateEKeyPair(exchange) // Generate EphemeralPubKey - if err != nil { - return nil, nil, err - } var handshake bytes.Buffer // Gather corpus to sign. handshake.Write(encoded) @@ -110,77 +117,71 @@ func Handshake(self, remote *peer.Peer, in <-chan []byte, out chan<- []byte) (<- exPacket := new(Exchange) exPacket.Epubkey = epubkey - exPacket.Signature, err = self.PrivKey.Sign(handshake.Bytes()) + exPacket.Signature, err = s.local.PrivKey.Sign(handshake.Bytes()) if err != nil { - return nil, nil, err + return err } exEncoded, err := proto.Marshal(exPacket) - if err != nil { - return nil, nil, err - } - out <- exEncoded + s.insecure.Out <- exEncoded // Parse their Exchange packet and generate a Finish packet. // Finish = E('Finish') - resp1 := <-in + var resp1 []byte + select { + case <-s.ctx.Done(): + return ErrClosed + case resp1 = <-s.insecure.In: + } exchangeResp := new(Exchange) err = proto.Unmarshal(resp1, exchangeResp) if err != nil { - return nil, nil, err + return err } var theirHandshake bytes.Buffer - _, err = theirHandshake.Write(resp) - if err != nil { - return nil, nil, err - } - _, err = theirHandshake.Write(encoded) - if err != nil { - return nil, nil, err - } - _, err = theirHandshake.Write(exchangeResp.GetEpubkey()) - if err != nil { - return nil, nil, err - } + theirHandshake.Write(resp) + theirHandshake.Write(encoded) + theirHandshake.Write(exchangeResp.GetEpubkey()) - ok, err := remote.PubKey.Verify(theirHandshake.Bytes(), exchangeResp.GetSignature()) + ok, err := s.remote.PubKey.Verify(theirHandshake.Bytes(), exchangeResp.GetSignature()) if err != nil { - return nil, nil, err + return err } if !ok { - return nil, nil, errors.New("Bad signature!") + return errors.New("Bad signature!") } secret, err := done(exchangeResp.GetEpubkey()) if err != nil { - return nil, nil, err + return err } - cmp := bytes.Compare(myPubKey, helloResp.GetPubkey()) + cmp := bytes.Compare(myPubKey, proposeResp.GetPubkey()) mIV, tIV, mCKey, tCKey, mMKey, tMKey := ci.KeyStretcher(cmp, cipherType, hashType, secret) - secureIn := make(chan []byte) - secureOut := make(chan []byte) - - go secureInProxy(in, secureIn, hashType, tIV, tCKey, tMKey) - go secureOutProxy(out, secureOut, hashType, mIV, mCKey, mMKey) + go s.handleSecureIn(hashType, tIV, tCKey, tMKey) + go s.handleSecureOut(hashType, mIV, mCKey, mMKey) finished := []byte("Finished") - secureOut <- finished - resp2 := <-secureIn + s.Out <- finished + var resp2 []byte + select { + case <-s.ctx.Done(): + return ErrClosed + case resp2 = <-s.Duplex.In: + } if bytes.Compare(resp2, finished) != 0 { - return nil, nil, errors.New("Negotiation failed.") + return errors.New("Negotiation failed.") } - u.DOut("[%s] identify: Got node id: %s\n", self.ID.Pretty(), remote.ID.Pretty()) - - return secureIn, secureOut, nil + u.DOut("[%s] identify: Got node id: %s\n", s.local.ID.Pretty(), s.remote.ID.Pretty()) + return nil } func makeMac(hashType string, key []byte) (hash.Hash, int) { @@ -194,16 +195,15 @@ func makeMac(hashType string, key []byte) (hash.Hash, int) { } } -func secureInProxy(in <-chan []byte, secureIn chan<- []byte, hashType string, tIV, tCKey, tMKey []byte) { +func (s *SecurePipe) handleSecureIn(hashType string, tIV, tCKey, tMKey []byte) { theirBlock, _ := aes.NewCipher(tCKey) theirCipher := cipher.NewCTR(theirBlock, tIV) theirMac, macSize := makeMac(hashType, tMKey) for { - data, ok := <-in + data, ok := <-s.insecure.In if !ok { - close(secureIn) return } @@ -223,23 +223,22 @@ func secureInProxy(in <-chan []byte, secureIn chan<- []byte, hashType string, tI hmacOk := hmac.Equal(data[mark:], expected) if hmacOk { - secureIn <- buff + s.Duplex.In <- buff } else { - secureIn <- nil + s.Duplex.In <- nil } } } -func secureOutProxy(out chan<- []byte, secureOut <-chan []byte, hashType string, mIV, mCKey, mMKey []byte) { +func (s *SecurePipe) handleSecureOut(hashType string, mIV, mCKey, mMKey []byte) { myBlock, _ := aes.NewCipher(mCKey) myCipher := cipher.NewCTR(myBlock, mIV) myMac, macSize := makeMac(hashType, mMKey) for { - data, ok := <-secureOut + data, ok := <-s.Out if !ok { - close(out) return } @@ -255,11 +254,11 @@ func secureOutProxy(out chan<- []byte, secureOut <-chan []byte, hashType string, copy(buff[len(data):], myMac.Sum(nil)) myMac.Reset() - out <- buff + s.insecure.Out <- buff } } -// IDFromPubKey returns Nodes ID given its public key +// IDFromPubKey retrieves a Public Key from the peer given by pk func IDFromPubKey(pk ci.PubKey) (peer.ID, error) { b, err := pk.Bytes() if err != nil { diff --git a/identify/identify_test.go b/crypto/spipe/identify_test.go similarity index 97% rename from identify/identify_test.go rename to crypto/spipe/identify_test.go index 3d529f3e48e..210d0cfcccf 100644 --- a/identify/identify_test.go +++ b/crypto/spipe/identify_test.go @@ -1,4 +1,4 @@ -package identify +package spipe import ( "testing" diff --git a/identify/message.pb.go b/crypto/spipe/message.pb.go similarity index 52% rename from identify/message.pb.go rename to crypto/spipe/message.pb.go index bd373c6e928..32392a57611 100644 --- a/identify/message.pb.go +++ b/crypto/spipe/message.pb.go @@ -1,68 +1,70 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-gogo. // source: message.proto // DO NOT EDIT! /* -Package identify is a generated protocol buffer package. +Package spipe is a generated protocol buffer package. It is generated from these files: message.proto It has these top-level messages: - Hello + Propose Exchange */ -package identify +package spipe -import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" +import json "encoding/json" import math "math" -// Reference imports to suppress errors if they are not otherwise used. +// Reference proto, json, and math imports to suppress error if they are not otherwise used. var _ = proto.Marshal +var _ = &json.SyntaxError{} var _ = math.Inf -type Hello struct { - Rand []byte `protobuf:"bytes,1,req,name=rand" json:"rand,omitempty"` - Pubkey []byte `protobuf:"bytes,2,req,name=pubkey" json:"pubkey,omitempty"` - Exchanges *string `protobuf:"bytes,3,req,name=exchanges" json:"exchanges,omitempty"` - Ciphers *string `protobuf:"bytes,4,req,name=ciphers" json:"ciphers,omitempty"` - Hashes *string `protobuf:"bytes,5,req,name=hashes" json:"hashes,omitempty"` +type Propose struct { + Rand []byte `protobuf:"bytes,1,opt,name=rand" json:"rand,omitempty"` + Pubkey []byte `protobuf:"bytes,2,opt,name=pubkey" json:"pubkey,omitempty"` + Exchanges *string `protobuf:"bytes,3,opt,name=exchanges" json:"exchanges,omitempty"` + Ciphers *string `protobuf:"bytes,4,opt,name=ciphers" json:"ciphers,omitempty"` + Hashes *string `protobuf:"bytes,5,opt,name=hashes" json:"hashes,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *Hello) Reset() { *m = Hello{} } -func (m *Hello) String() string { return proto.CompactTextString(m) } -func (*Hello) ProtoMessage() {} +func (m *Propose) Reset() { *m = Propose{} } +func (m *Propose) String() string { return proto.CompactTextString(m) } +func (*Propose) ProtoMessage() {} -func (m *Hello) GetRand() []byte { +func (m *Propose) GetRand() []byte { if m != nil { return m.Rand } return nil } -func (m *Hello) GetPubkey() []byte { +func (m *Propose) GetPubkey() []byte { if m != nil { return m.Pubkey } return nil } -func (m *Hello) GetExchanges() string { +func (m *Propose) GetExchanges() string { if m != nil && m.Exchanges != nil { return *m.Exchanges } return "" } -func (m *Hello) GetCiphers() string { +func (m *Propose) GetCiphers() string { if m != nil && m.Ciphers != nil { return *m.Ciphers } return "" } -func (m *Hello) GetHashes() string { +func (m *Propose) GetHashes() string { if m != nil && m.Hashes != nil { return *m.Hashes } @@ -70,8 +72,8 @@ func (m *Hello) GetHashes() string { } type Exchange struct { - Epubkey []byte `protobuf:"bytes,1,req,name=epubkey" json:"epubkey,omitempty"` - Signature []byte `protobuf:"bytes,2,req,name=signature" json:"signature,omitempty"` + Epubkey []byte `protobuf:"bytes,1,opt,name=epubkey" json:"epubkey,omitempty"` + Signature []byte `protobuf:"bytes,2,opt,name=signature" json:"signature,omitempty"` XXX_unrecognized []byte `json:"-"` } diff --git a/crypto/spipe/message.proto b/crypto/spipe/message.proto new file mode 100644 index 00000000000..191dd0b806e --- /dev/null +++ b/crypto/spipe/message.proto @@ -0,0 +1,14 @@ +package spipe; + +message Propose { + optional bytes rand = 1; + optional bytes pubkey = 2; + optional string exchanges = 3; + optional string ciphers = 4; + optional string hashes = 5; +} + +message Exchange { + optional bytes epubkey = 1; + optional bytes signature = 2; +} diff --git a/crypto/spipe/pipe.go b/crypto/spipe/pipe.go new file mode 100644 index 00000000000..caa539275ac --- /dev/null +++ b/crypto/spipe/pipe.go @@ -0,0 +1,76 @@ +package spipe + +import ( + "errors" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + peer "github.com/jbenet/go-ipfs/peer" +) + +// Duplex is a simple duplex channel +type Duplex struct { + In chan []byte + Out chan []byte +} + +// SecurePipe objects represent a bi-directional message channel. +type SecurePipe struct { + Duplex + insecure Duplex + + local *peer.Peer + remote *peer.Peer + + params params + + ctx context.Context + cancel context.CancelFunc +} + +// options in a secure pipe +type params struct { +} + +// NewSecurePipe constructs a pipe with channels of a given buffer size. +func NewSecurePipe(ctx context.Context, bufsize int, local, + remote *peer.Peer) (*SecurePipe, error) { + + sp := &SecurePipe{ + Duplex: Duplex{ + In: make(chan []byte, bufsize), + Out: make(chan []byte, bufsize), + }, + local: local, + remote: remote, + } + return sp, nil +} + +// Wrap creates a secure connection on top of an insecure duplex channel. +func (s *SecurePipe) Wrap(ctx context.Context, insecure Duplex) error { + if s.ctx != nil { + return errors.New("Pipe in use") + } + + s.insecure = insecure + s.ctx, s.cancel = context.WithCancel(ctx) + + if err := s.handshake(); err != nil { + s.cancel() + return err + } + + return nil +} + +// Close closes the secure pipe +func (s *SecurePipe) Close() error { + if s.cancel == nil { + return errors.New("pipe already closed") + } + + s.cancel() + s.cancel = nil + close(s.In) + return nil +} diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go new file mode 100644 index 00000000000..04cd6c2b7f1 --- /dev/null +++ b/daemon/daemon_test.go @@ -0,0 +1,65 @@ +package daemon + +import ( + "encoding/base64" + "testing" + + config "github.com/jbenet/go-ipfs/config" + core "github.com/jbenet/go-ipfs/core" + ci "github.com/jbenet/go-ipfs/crypto" + identify "github.com/jbenet/go-ipfs/identify" +) + +func TestInitializeDaemonListener(t *testing.T) { + + priv, pub, err := ci.GenerateKeyPair(ci.RSA, 512) + if err != nil { + t.Fatal(err) + } + prbytes, err := priv.Bytes() + if err != nil { + t.Fatal(err) + } + + ident, _ := identify.IDFromPubKey(pub) + privKey := base64.StdEncoding.EncodeToString(prbytes) + pID := ident.Pretty() + + id := &config.Identity{ + PeerID: pID, + Address: "/ip4/127.0.0.1/tcp/8000", + PrivKey: privKey, + } + + nodeConfigs := []*config.Config{ + &config.Config{ + Identity: id, + Datastore: config.Datastore{ + Type: "memory", + }, + }, + + &config.Config{ + Identity: id, + Datastore: config.Datastore{ + Type: "leveldb", + Path: ".testdb", + }, + }, + } + + for _, c := range nodeConfigs { + + node, _ := core.NewIpfsNode(c, false) + dl, initErr := NewDaemonListener(node, "localhost:1327") + if initErr != nil { + t.Fatal(initErr) + } + closeErr := dl.Close() + if closeErr != nil { + t.Fatal(closeErr) + } + + } + +} diff --git a/identify/message.proto b/identify/message.proto deleted file mode 100644 index 4c3e032e50f..00000000000 --- a/identify/message.proto +++ /dev/null @@ -1,14 +0,0 @@ -package identify; - -message Hello { - required bytes rand = 1; - required bytes pubkey = 2; - required string exchanges = 3; - required string ciphers = 4; - required string hashes = 5; -} - -message Exchange { - required bytes epubkey = 1; - required bytes signature = 2; -} diff --git a/swarm/conn.go b/net/conn/conn.go similarity index 66% rename from swarm/conn.go rename to net/conn/conn.go index 468e86cd2ad..4e6ded25854 100644 --- a/swarm/conn.go +++ b/net/conn/conn.go @@ -1,4 +1,4 @@ -package swarm +package conn import ( "fmt" @@ -6,6 +6,8 @@ import ( msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" + + spipe "github.com/jbenet/go-ipfs/crypto/spipe" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" ) @@ -25,12 +27,37 @@ type Conn struct { Closed chan bool Outgoing *msgio.Chan Incoming *msgio.Chan - secIn <-chan []byte - secOut chan<- []byte + Secure *spipe.SecurePipe +} + +// Map maps Keys (Peer.IDs) to Connections. +type Map map[u.Key]*Conn + +// NewConn constructs a new connection +func NewConn(peer *peer.Peer, addr *ma.Multiaddr, nconn net.Conn) (*Conn, error) { + conn := &Conn{ + Peer: peer, + Addr: addr, + Conn: nconn, + } + + if err := conn.newChans(); err != nil { + return nil, err + } + + return conn, nil } -// ConnMap maps Keys (Peer.IDs) to Connections. -type ConnMap map[u.Key]*Conn +// NewNetConn constructs a new connection with given net.Conn +func NewNetConn(nconn net.Conn) (*Conn, error) { + + addr, err := ma.FromNetAddr(nconn.RemoteAddr()) + if err != nil { + return nil, err + } + + return NewConn(new(peer.Peer), addr, nconn) +} // Dial connects to a particular peer, over a given network // Example: Dial("udp", peer) @@ -50,18 +77,11 @@ func Dial(network string, peer *peer.Peer) (*Conn, error) { return nil, err } - conn := &Conn{ - Peer: peer, - Addr: addr, - Conn: nconn, - } - - newConnChans(conn) - return conn, nil + return NewConn(peer, addr, nconn) } // Construct new channels for given Conn. -func newConnChans(c *Conn) error { +func (c *Conn) newChans() error { if c.Outgoing != nil || c.Incoming != nil { return fmt.Errorf("Conn already initialized") } @@ -77,18 +97,18 @@ func newConnChans(c *Conn) error { } // Close closes the connection, and associated channels. -func (s *Conn) Close() error { +func (c *Conn) Close() error { u.DOut("Closing Conn.\n") - if s.Conn == nil { + if c.Conn == nil { return fmt.Errorf("Already closed") // already closed } // closing net connection - err := s.Conn.Close() - s.Conn = nil + err := c.Conn.Close() + c.Conn = nil // closing channels - s.Incoming.Close() - s.Outgoing.Close() - s.Closed <- true + c.Incoming.Close() + c.Outgoing.Close() + c.Closed <- true return err } diff --git a/swarm/conn_test.go b/net/conn/conn_test.go similarity index 86% rename from swarm/conn_test.go rename to net/conn/conn_test.go index 952434acf0b..219004be8fb 100644 --- a/swarm/conn_test.go +++ b/net/conn/conn_test.go @@ -1,12 +1,13 @@ -package swarm +package conn import ( - "fmt" - ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" - mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" - peer "github.com/jbenet/go-ipfs/peer" "net" "testing" + + peer "github.com/jbenet/go-ipfs/peer" + + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" ) func setupPeer(id string, addr string) (*peer.Peer, error) { @@ -29,7 +30,7 @@ func echoListen(listener *net.TCPListener) { for { c, err := listener.Accept() if err == nil { - fmt.Println("accepeted") + // fmt.Println("accepeted") go echo(c) } } @@ -40,15 +41,15 @@ func echo(c net.Conn) { data := make([]byte, 1024) i, err := c.Read(data) if err != nil { - fmt.Printf("error %v\n", err) + // fmt.Printf("error %v\n", err) return } _, err = c.Write(data[:i]) if err != nil { - fmt.Printf("error %v\n", err) + // fmt.Printf("error %v\n", err) return } - fmt.Println("echoing", data[:i]) + // fmt.Println("echoing", data[:i]) } } @@ -70,11 +71,11 @@ func TestDial(t *testing.T) { t.Fatal("error dialing peer", err) } - fmt.Println("sending") + // fmt.Println("sending") c.Outgoing.MsgChan <- []byte("beep") c.Outgoing.MsgChan <- []byte("boop") out := <-c.Incoming.MsgChan - fmt.Println("recving", string(out)) + // fmt.Println("recving", string(out)) if string(out) != "beep" { t.Error("unexpected conn output") } @@ -84,7 +85,7 @@ func TestDial(t *testing.T) { t.Error("unexpected conn output") } - fmt.Println("closing") + // fmt.Println("closing") c.Close() listener.Close() } diff --git a/net/interface.go b/net/interface.go new file mode 100644 index 00000000000..85df4c8f1f4 --- /dev/null +++ b/net/interface.go @@ -0,0 +1,49 @@ +package net + +import ( + msg "github.com/jbenet/go-ipfs/net/message" + mux "github.com/jbenet/go-ipfs/net/mux" + srv "github.com/jbenet/go-ipfs/net/service" + peer "github.com/jbenet/go-ipfs/peer" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" +) + +// Network is the interface IPFS uses for connecting to the world. +type Network interface { + + // Listen handles incoming connections on given Multiaddr. + // Listen(*ma.Muliaddr) error + // TODO: for now, only listen on addrs in local peer when initializing. + + // DialPeer attempts to establish a connection to a given peer + DialPeer(*peer.Peer) error + + // ClosePeer connection to peer + ClosePeer(*peer.Peer) error + + // IsConnected returns whether a connection to given peer exists. + IsConnected(*peer.Peer) (bool, error) + + // GetProtocols returns the protocols registered in the network. + GetProtocols() *mux.ProtocolMap + + // SendMessage sends given Message out + SendMessage(msg.NetMessage) error + + // Close terminates all network operation + Close() error +} + +// Sender interface for network services. +type Sender interface { + // SendMessage sends out a given message, without expecting a response. + SendMessage(ctx context.Context, m msg.NetMessage) error + + // SendRequest sends out a given message, and awaits a response. + // Set Deadlines or cancellations in the context.Context you pass in. + SendRequest(ctx context.Context, m msg.NetMessage) (msg.NetMessage, error) +} + +// Handler interface for network services. +type Handler srv.Handler diff --git a/net/message/message.go b/net/message/message.go new file mode 100644 index 00000000000..11053e423cc --- /dev/null +++ b/net/message/message.go @@ -0,0 +1,57 @@ +package message + +import ( + peer "github.com/jbenet/go-ipfs/peer" + + proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +) + +type NetMessage interface { + Peer() *peer.Peer + Data() []byte +} + +func New(p *peer.Peer, data []byte) NetMessage { + return &message{peer: p, data: data} +} + +// message represents a packet of information sent to or received from a +// particular Peer. +type message struct { + // To or from, depending on direction. + peer *peer.Peer + + // Opaque data + data []byte +} + +func (m *message) Peer() *peer.Peer { + return m.peer +} + +func (m *message) Data() []byte { + return m.data +} + +// FromObject creates a message from a protobuf-marshallable message. +func FromObject(p *peer.Peer, data proto.Message) (NetMessage, error) { + bytes, err := proto.Marshal(data) + if err != nil { + return nil, err + } + return New(p, bytes), nil +} + +// Pipe objects represent a bi-directional message channel. +type Pipe struct { + Incoming chan NetMessage + Outgoing chan NetMessage +} + +// NewPipe constructs a pipe with channels of a given buffer size. +func NewPipe(bufsize int) *Pipe { + return &Pipe{ + Incoming: make(chan NetMessage, bufsize), + Outgoing: make(chan NetMessage, bufsize), + } +} diff --git a/net/mux/Makefile b/net/mux/Makefile new file mode 100644 index 00000000000..615698949d0 --- /dev/null +++ b/net/mux/Makefile @@ -0,0 +1,8 @@ + +all: mux.pb.go + +mux.pb.go: mux.proto + protoc --gogo_out=. --proto_path=../../../../../:/usr/local/opt/protobuf/include:. $< + +clean: + rm mux.pb.go diff --git a/net/mux/mux.go b/net/mux/mux.go new file mode 100644 index 00000000000..e6cf0651fbe --- /dev/null +++ b/net/mux/mux.go @@ -0,0 +1,165 @@ +package mux + +import ( + "errors" + + msg "github.com/jbenet/go-ipfs/net/message" + u "github.com/jbenet/go-ipfs/util" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +) + +// Protocol objects produce + consume raw data. They are added to the Muxer +// with a ProtocolID, which is added to outgoing payloads. Muxer properly +// encapsulates and decapsulates when interfacing with its Protocols. The +// Protocols do not encounter their ProtocolID. +type Protocol interface { + GetPipe() *msg.Pipe +} + +// ProtocolMap maps ProtocolIDs to Protocols. +type ProtocolMap map[ProtocolID]Protocol + +// Muxer is a simple multiplexor that reads + writes to Incoming and Outgoing +// channels. It multiplexes various protocols, wrapping and unwrapping data +// with a ProtocolID. +type Muxer struct { + // Protocols are the multiplexed services. + Protocols ProtocolMap + + // cancel is the function to stop the Muxer + cancel context.CancelFunc + + *msg.Pipe +} + +// GetPipe implements the Protocol interface +func (m *Muxer) GetPipe() *msg.Pipe { + return m.Pipe +} + +// Start kicks off the Muxer goroutines. +func (m *Muxer) Start(ctx context.Context) error { + if m.cancel != nil { + return errors.New("Muxer already started.") + } + + // make a cancellable context. + ctx, m.cancel = context.WithCancel(ctx) + + go m.handleIncomingMessages(ctx) + for pid, proto := range m.Protocols { + go m.handleOutgoingMessages(ctx, pid, proto) + } + + return nil +} + +// Stop stops muxer activity. +func (m *Muxer) Stop() { + m.cancel() + m.cancel = context.CancelFunc(nil) +} + +// AddProtocol adds a Protocol with given ProtocolID to the Muxer. +func (m *Muxer) AddProtocol(p Protocol, pid ProtocolID) error { + if _, found := m.Protocols[pid]; found { + return errors.New("Another protocol already using this ProtocolID") + } + + m.Protocols[pid] = p + return nil +} + +// handleIncoming consumes the messages on the m.Incoming channel and +// routes them appropriately (to the protocols). +func (m *Muxer) handleIncomingMessages(ctx context.Context) { + for { + select { + case msg := <-m.Incoming: + go m.handleIncomingMessage(ctx, msg) + + case <-ctx.Done(): + return + } + } +} + +// handleIncomingMessage routes message to the appropriate protocol. +func (m *Muxer) handleIncomingMessage(ctx context.Context, m1 msg.NetMessage) { + + data, pid, err := unwrapData(m1.Data()) + if err != nil { + u.PErr("muxer de-serializing error: %v\n", err) + return + } + + m2 := msg.New(m1.Peer(), data) + proto, found := m.Protocols[pid] + if !found { + u.PErr("muxer unknown protocol %v\n", pid) + return + } + + select { + case proto.GetPipe().Incoming <- m2: + case <-ctx.Done(): + u.PErr("%v\n", ctx.Err()) + return + } +} + +// handleOutgoingMessages consumes the messages on the proto.Outgoing channel, +// wraps them and sends them out. +func (m *Muxer) handleOutgoingMessages(ctx context.Context, pid ProtocolID, proto Protocol) { + for { + select { + case msg := <-proto.GetPipe().Outgoing: + go m.handleOutgoingMessage(ctx, pid, msg) + + case <-ctx.Done(): + return + } + } +} + +// handleOutgoingMessage wraps out a message and sends it out the +func (m *Muxer) handleOutgoingMessage(ctx context.Context, pid ProtocolID, m1 msg.NetMessage) { + data, err := wrapData(m1.Data(), pid) + if err != nil { + u.PErr("muxer serializing error: %v\n", err) + return + } + + m2 := msg.New(m1.Peer(), data) + select { + case m.GetPipe().Outgoing <- m2: + case <-ctx.Done(): + return + } +} + +func wrapData(data []byte, pid ProtocolID) ([]byte, error) { + // Marshal + pbm := new(PBProtocolMessage) + pbm.ProtocolID = &pid + pbm.Data = data + b, err := proto.Marshal(pbm) + if err != nil { + return nil, err + } + + return b, nil +} + +func unwrapData(data []byte) ([]byte, ProtocolID, error) { + // Unmarshal + pbm := new(PBProtocolMessage) + err := proto.Unmarshal(data, pbm) + if err != nil { + return nil, 0, err + } + + return pbm.GetData(), pbm.GetProtocolID(), nil +} diff --git a/net/mux/mux.pb.go b/net/mux/mux.pb.go new file mode 100644 index 00000000000..f4e95191527 --- /dev/null +++ b/net/mux/mux.pb.go @@ -0,0 +1,90 @@ +// Code generated by protoc-gen-gogo. +// source: mux.proto +// DO NOT EDIT! + +/* +Package mux is a generated protocol buffer package. + +It is generated from these files: + mux.proto + +It has these top-level messages: + PBProtocolMessage +*/ +package mux + +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" +import json "encoding/json" +import math "math" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +type ProtocolID int32 + +const ( + ProtocolID_Test ProtocolID = 0 + ProtocolID_Identify ProtocolID = 1 + ProtocolID_Routing ProtocolID = 2 + ProtocolID_Exchange ProtocolID = 3 +) + +var ProtocolID_name = map[int32]string{ + 0: "Test", + 1: "Identify", + 2: "Routing", + 3: "Exchange", +} +var ProtocolID_value = map[string]int32{ + "Test": 0, + "Identify": 1, + "Routing": 2, + "Exchange": 3, +} + +func (x ProtocolID) Enum() *ProtocolID { + p := new(ProtocolID) + *p = x + return p +} +func (x ProtocolID) String() string { + return proto.EnumName(ProtocolID_name, int32(x)) +} +func (x *ProtocolID) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ProtocolID_value, data, "ProtocolID") + if err != nil { + return err + } + *x = ProtocolID(value) + return nil +} + +type PBProtocolMessage struct { + ProtocolID *ProtocolID `protobuf:"varint,1,req,enum=mux.ProtocolID" json:"ProtocolID,omitempty"` + Data []byte `protobuf:"bytes,2,req" json:"Data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PBProtocolMessage) Reset() { *m = PBProtocolMessage{} } +func (m *PBProtocolMessage) String() string { return proto.CompactTextString(m) } +func (*PBProtocolMessage) ProtoMessage() {} + +func (m *PBProtocolMessage) GetProtocolID() ProtocolID { + if m != nil && m.ProtocolID != nil { + return *m.ProtocolID + } + return ProtocolID_Test +} + +func (m *PBProtocolMessage) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterEnum("mux.ProtocolID", ProtocolID_name, ProtocolID_value) +} diff --git a/net/mux/mux.proto b/net/mux/mux.proto new file mode 100644 index 00000000000..0883cb6553c --- /dev/null +++ b/net/mux/mux.proto @@ -0,0 +1,13 @@ +package mux; + +enum ProtocolID { + Test = 0; + Identify = 1; // setup + Routing = 2; // dht + Exchange = 3; // bitswap +} + +message PBProtocolMessage { + required ProtocolID ProtocolID = 1; + required bytes Data = 2; +} diff --git a/net/mux/mux_test.go b/net/mux/mux_test.go new file mode 100644 index 00000000000..d28c3aa6cb6 --- /dev/null +++ b/net/mux/mux_test.go @@ -0,0 +1,285 @@ +package mux + +import ( + "bytes" + "fmt" + "testing" + "time" + + msg "github.com/jbenet/go-ipfs/net/message" + peer "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" +) + +type TestProtocol struct { + *msg.Pipe +} + +func (t *TestProtocol) GetPipe() *msg.Pipe { + return t.Pipe +} + +func newPeer(t *testing.T, id string) *peer.Peer { + mh, err := mh.FromHexString(id) + if err != nil { + t.Error(err) + return nil + } + + return &peer.Peer{ID: peer.ID(mh)} +} + +func testMsg(t *testing.T, m msg.NetMessage, data []byte) { + if !bytes.Equal(data, m.Data()) { + t.Errorf("Data does not match: %v != %v", data, m.Data()) + } +} + +func testWrappedMsg(t *testing.T, m msg.NetMessage, pid ProtocolID, data []byte) { + data2, pid2, err := unwrapData(m.Data()) + if err != nil { + t.Error(err) + } + + if pid != pid2 { + t.Errorf("ProtocolIDs do not match: %v != %v", pid, pid2) + } + + if !bytes.Equal(data, data2) { + t.Errorf("Data does not match: %v != %v", data, data2) + } +} + +func TestSimpleMuxer(t *testing.T) { + + // setup + p1 := &TestProtocol{Pipe: msg.NewPipe(10)} + p2 := &TestProtocol{Pipe: msg.NewPipe(10)} + pid1 := ProtocolID_Test + pid2 := ProtocolID_Routing + mux1 := &Muxer{ + Pipe: msg.NewPipe(10), + Protocols: ProtocolMap{ + pid1: p1, + pid2: p2, + }, + } + peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa") + // peer2 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275bbbbbb") + + // run muxer + ctx := context.Background() + mux1.Start(ctx) + + // test outgoing p1 + for _, s := range []string{"foo", "bar", "baz"} { + p1.Outgoing <- msg.New(peer1, []byte(s)) + testWrappedMsg(t, <-mux1.Outgoing, pid1, []byte(s)) + } + + // test incoming p1 + for _, s := range []string{"foo", "bar", "baz"} { + d, err := wrapData([]byte(s), pid1) + if err != nil { + t.Error(err) + } + mux1.Incoming <- msg.New(peer1, d) + testMsg(t, <-p1.Incoming, []byte(s)) + } + + // test outgoing p2 + for _, s := range []string{"foo", "bar", "baz"} { + p2.Outgoing <- msg.New(peer1, []byte(s)) + testWrappedMsg(t, <-mux1.Outgoing, pid2, []byte(s)) + } + + // test incoming p2 + for _, s := range []string{"foo", "bar", "baz"} { + d, err := wrapData([]byte(s), pid2) + if err != nil { + t.Error(err) + } + mux1.Incoming <- msg.New(peer1, d) + testMsg(t, <-p2.Incoming, []byte(s)) + } +} + +func TestSimultMuxer(t *testing.T) { + + // setup + p1 := &TestProtocol{Pipe: msg.NewPipe(10)} + p2 := &TestProtocol{Pipe: msg.NewPipe(10)} + pid1 := ProtocolID_Test + pid2 := ProtocolID_Identify + mux1 := &Muxer{ + Pipe: msg.NewPipe(10), + Protocols: ProtocolMap{ + pid1: p1, + pid2: p2, + }, + } + peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa") + // peer2 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275bbbbbb") + + // run muxer + ctx, cancel := context.WithCancel(context.Background()) + mux1.Start(ctx) + + // counts + total := 10000 + speed := time.Microsecond * 1 + counts := [2][2][2]int{} + + // run producers at every end sending incrementing messages + produceOut := func(pid ProtocolID, size int) { + limiter := time.Tick(speed) + for i := 0; i < size; i++ { + <-limiter + s := fmt.Sprintf("proto %v out %v", pid, i) + m := msg.New(peer1, []byte(s)) + mux1.Protocols[pid].GetPipe().Outgoing <- m + counts[pid][0][0]++ + u.DOut("sent %v\n", s) + } + } + + produceIn := func(pid ProtocolID, size int) { + limiter := time.Tick(speed) + for i := 0; i < size; i++ { + <-limiter + s := fmt.Sprintf("proto %v in %v", pid, i) + d, err := wrapData([]byte(s), pid) + if err != nil { + t.Error(err) + } + + m := msg.New(peer1, d) + mux1.Incoming <- m + counts[pid][1][0]++ + u.DOut("sent %v\n", s) + } + } + + consumeOut := func() { + for { + select { + case m := <-mux1.Outgoing: + data, pid, err := unwrapData(m.Data()) + if err != nil { + t.Error(err) + } + + u.DOut("got %v\n", string(data)) + counts[pid][1][1]++ + + case <-ctx.Done(): + return + } + } + } + + consumeIn := func(pid ProtocolID) { + for { + select { + case m := <-mux1.Protocols[pid].GetPipe().Incoming: + counts[pid][0][1]++ + u.DOut("got %v\n", string(m.Data())) + case <-ctx.Done(): + return + } + } + } + + go produceOut(pid1, total) + go produceOut(pid2, total) + go produceIn(pid1, total) + go produceIn(pid2, total) + go consumeOut() + go consumeIn(pid1) + go consumeIn(pid2) + + limiter := time.Tick(speed) + for { + <-limiter + got := counts[0][0][0] + counts[0][0][1] + + counts[0][1][0] + counts[0][1][1] + + counts[1][0][0] + counts[1][0][1] + + counts[1][1][0] + counts[1][1][1] + + if got == total*8 { + cancel() + return + } + } + +} + +func TestStopping(t *testing.T) { + + // setup + p1 := &TestProtocol{Pipe: msg.NewPipe(10)} + p2 := &TestProtocol{Pipe: msg.NewPipe(10)} + pid1 := ProtocolID_Test + pid2 := ProtocolID_Identify + mux1 := &Muxer{ + Pipe: msg.NewPipe(10), + Protocols: ProtocolMap{ + pid1: p1, + pid2: p2, + }, + } + peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa") + // peer2 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275bbbbbb") + + // run muxer + mux1.Start(context.Background()) + + // test outgoing p1 + for _, s := range []string{"foo", "bar", "baz"} { + p1.Outgoing <- msg.New(peer1, []byte(s)) + testWrappedMsg(t, <-mux1.Outgoing, pid1, []byte(s)) + } + + // test incoming p1 + for _, s := range []string{"foo", "bar", "baz"} { + d, err := wrapData([]byte(s), pid1) + if err != nil { + t.Error(err) + } + mux1.Incoming <- msg.New(peer1, d) + testMsg(t, <-p1.Incoming, []byte(s)) + } + + mux1.Stop() + if mux1.cancel != nil { + t.Error("mux.cancel should be nil") + } + + // test outgoing p1 + for _, s := range []string{"foo", "bar", "baz"} { + p1.Outgoing <- msg.New(peer1, []byte(s)) + select { + case <-mux1.Outgoing: + t.Error("should not have received anything.") + case <-time.After(time.Millisecond): + } + } + + // test incoming p1 + for _, s := range []string{"foo", "bar", "baz"} { + d, err := wrapData([]byte(s), pid1) + if err != nil { + t.Error(err) + } + mux1.Incoming <- msg.New(peer1, d) + select { + case <-p1.Incoming: + t.Error("should not have received anything.") + case <-time.After(time.Millisecond): + } + } + +} diff --git a/net/net.go b/net/net.go new file mode 100644 index 00000000000..e080ff97c04 --- /dev/null +++ b/net/net.go @@ -0,0 +1,106 @@ +package net + +import ( + "errors" + + msg "github.com/jbenet/go-ipfs/net/message" + mux "github.com/jbenet/go-ipfs/net/mux" + swarm "github.com/jbenet/go-ipfs/net/swarm" + peer "github.com/jbenet/go-ipfs/peer" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" +) + +// IpfsNetwork implements the Network interface, +type IpfsNetwork struct { + + // local peer + local *peer.Peer + + // protocol multiplexing + muxer *mux.Muxer + + // peer connection multiplexing + swarm *swarm.Swarm + + // network context + ctx context.Context + cancel context.CancelFunc +} + +// NewIpfsNetwork is the structure that implements the network interface +func NewIpfsNetwork(ctx context.Context, local *peer.Peer, + pmap *mux.ProtocolMap) (*IpfsNetwork, error) { + + ctx, cancel := context.WithCancel(ctx) + + in := &IpfsNetwork{ + local: local, + muxer: &mux.Muxer{Protocols: *pmap}, + ctx: ctx, + cancel: cancel, + } + + err := in.muxer.Start(ctx) + if err != nil { + cancel() + return nil, err + } + + in.swarm, err = swarm.NewSwarm(ctx, local) + if err != nil { + cancel() + return nil, err + } + + return in, nil +} + +// Listen handles incoming connections on given Multiaddr. +// func (n *IpfsNetwork) Listen(*ma.Muliaddr) error {} + +// DialPeer attempts to establish a connection to a given peer +func (n *IpfsNetwork) DialPeer(p *peer.Peer) error { + _, err := n.swarm.Dial(p) + return err +} + +// ClosePeer connection to peer +func (n *IpfsNetwork) ClosePeer(p *peer.Peer) error { + return n.swarm.CloseConnection(p) +} + +// IsConnected returns whether a connection to given peer exists. +func (n *IpfsNetwork) IsConnected(p *peer.Peer) (bool, error) { + return n.swarm.GetConnection(p.ID) != nil, nil +} + +// GetProtocols returns the protocols registered in the network. +func (n *IpfsNetwork) GetProtocols() *mux.ProtocolMap { + // copy over because this map should be read only. + pmap := mux.ProtocolMap{} + for id, proto := range n.muxer.Protocols { + pmap[id] = proto + } + return &pmap +} + +// SendMessage sends given Message out +func (n *IpfsNetwork) SendMessage(m msg.NetMessage) error { + n.swarm.Outgoing <- m + return nil +} + +// Close terminates all network operation +func (n *IpfsNetwork) Close() error { + if n.cancel == nil { + return errors.New("Network already closed.") + } + + n.swarm.Close() + n.muxer.Stop() + + n.cancel() + n.cancel = nil + return nil +} diff --git a/net/net_test.go b/net/net_test.go new file mode 100644 index 00000000000..9d9f1a11e22 --- /dev/null +++ b/net/net_test.go @@ -0,0 +1 @@ +package net diff --git a/net/service/Makefile b/net/service/Makefile new file mode 100644 index 00000000000..990c6ade7e1 --- /dev/null +++ b/net/service/Makefile @@ -0,0 +1,8 @@ + +all: request.pb.go + +request.pb.go: request.proto + protoc --gogo_out=. --proto_path=../../../../../:/usr/local/opt/protobuf/include:. $< + +clean: + rm request.pb.go diff --git a/net/service/request.go b/net/service/request.go new file mode 100644 index 00000000000..0905e3a635b --- /dev/null +++ b/net/service/request.go @@ -0,0 +1,127 @@ +package service + +import ( + crand "crypto/rand" + + msg "github.com/jbenet/go-ipfs/net/message" + peer "github.com/jbenet/go-ipfs/peer" + + proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +) + +const ( + // IDSize is the size of the ID in bytes. + IDSize int = 4 +) + +// RequestID is a field that identifies request-response flows. +type RequestID []byte + +// Request turns a RequestID into a Request (unsetting first bit) +func (r RequestID) Request() RequestID { + if r == nil { + return nil + } + r2 := make([]byte, len(r)) + copy(r2, r) + r2[0] = r[0] & 0x7F // unset first bit for request + return RequestID(r2) +} + +// Response turns a RequestID into a Response (setting first bit) +func (r RequestID) Response() RequestID { + if r == nil { + return nil + } + r2 := make([]byte, len(r)) + copy(r2, r) + r2[0] = r[0] | 0x80 // set first bit for response + return RequestID(r2) +} + +// IsRequest returns whether a RequestID identifies a request +func (r RequestID) IsRequest() bool { + if r == nil { + return false + } + return !r.IsResponse() +} + +// IsResponse returns whether a RequestID identifies a response +func (r RequestID) IsResponse() bool { + if r == nil { + return false + } + return bool(r[0]&0x80 == 0x80) +} + +// RandomRequestID creates and returns a new random request ID +func RandomRequestID() (RequestID, error) { + buf := make([]byte, IDSize) + _, err := crand.Read(buf) + return RequestID(buf).Request(), err +} + +// RequestMap is a map of Requests. the key = (peer.ID concat RequestID). +type RequestMap map[string]*Request + +// Request objects are used to multiplex request-response flows. +type Request struct { + + // ID is the RequestID identifying this Request-Response Flow. + ID RequestID + + // PeerID identifies the peer from whom to expect the response. + PeerID peer.ID + + // Response is the channel of incoming responses. + Response chan msg.NetMessage +} + +// NewRequest creates a request for given peer.ID +func NewRequest(pid peer.ID) (*Request, error) { + id, err := RandomRequestID() + if err != nil { + return nil, err + } + + return &Request{ + ID: id, + PeerID: pid, + Response: make(chan msg.NetMessage, 1), + }, nil +} + +// Key returns the RequestKey for this request. Use with maps. +func (r *Request) Key() string { + return RequestKey(r.PeerID, r.ID) +} + +// RequestKey is the peer.ID concatenated with the RequestID. Use with maps. +func RequestKey(pid peer.ID, rid RequestID) string { + return string(pid) + string(rid.Request()[:]) +} + +func wrapData(data []byte, rid RequestID) ([]byte, error) { + // Marshal + pbm := new(PBRequest) + pbm.Data = data + pbm.Tag = rid + b, err := proto.Marshal(pbm) + if err != nil { + return nil, err + } + + return b, nil +} + +func unwrapData(data []byte) ([]byte, RequestID, error) { + // Unmarshal + pbm := new(PBRequest) + err := proto.Unmarshal(data, pbm) + if err != nil { + return nil, nil, err + } + + return pbm.GetData(), pbm.GetTag(), nil +} diff --git a/net/service/request.pb.go b/net/service/request.pb.go new file mode 100644 index 00000000000..1766f61cb30 --- /dev/null +++ b/net/service/request.pb.go @@ -0,0 +1,50 @@ +// Code generated by protoc-gen-gogo. +// source: request.proto +// DO NOT EDIT! + +/* +Package service is a generated protocol buffer package. + +It is generated from these files: + request.proto + +It has these top-level messages: + PBRequest +*/ +package service + +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" +import json "encoding/json" +import math "math" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +type PBRequest struct { + Data []byte `protobuf:"bytes,1,req" json:"Data,omitempty"` + Tag []byte `protobuf:"bytes,3,opt" json:"Tag,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PBRequest) Reset() { *m = PBRequest{} } +func (m *PBRequest) String() string { return proto.CompactTextString(m) } +func (*PBRequest) ProtoMessage() {} + +func (m *PBRequest) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *PBRequest) GetTag() []byte { + if m != nil { + return m.Tag + } + return nil +} + +func init() { +} diff --git a/net/service/request.proto b/net/service/request.proto new file mode 100644 index 00000000000..695308f50b9 --- /dev/null +++ b/net/service/request.proto @@ -0,0 +1,6 @@ +package service; + +message PBRequest { + required bytes Data = 1; + optional bytes Tag = 3; +} diff --git a/net/service/request_test.go b/net/service/request_test.go new file mode 100644 index 00000000000..1931f8f6382 --- /dev/null +++ b/net/service/request_test.go @@ -0,0 +1,41 @@ +package service + +import ( + "bytes" + "testing" +) + +func TestMarshaling(t *testing.T) { + + test := func(d1 []byte, rid1 RequestID) { + d2, err := wrapData(d1, rid1) + if err != nil { + t.Error(err) + } + + d3, rid2, err := unwrapData(d2) + if err != nil { + t.Error(err) + } + + d4, err := wrapData(d3, rid1) + if err != nil { + t.Error(err) + } + + if !bytes.Equal(rid2, rid1) { + t.Error("RequestID fail") + } + + if !bytes.Equal(d1, d3) { + t.Error("unmarshalled data should be the same") + } + + if !bytes.Equal(d2, d4) { + t.Error("marshalled data should be the same") + } + } + + test([]byte("foo"), []byte{1, 2, 3, 4}) + test([]byte("bar"), nil) +} diff --git a/net/service/service.go b/net/service/service.go new file mode 100644 index 00000000000..caa2e2354e9 --- /dev/null +++ b/net/service/service.go @@ -0,0 +1,210 @@ +package service + +import ( + "errors" + "sync" + + msg "github.com/jbenet/go-ipfs/net/message" + u "github.com/jbenet/go-ipfs/util" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" +) + +// Handler is an interface that objects must implement in order to handle +// a service's requests. +type Handler interface { + + // HandleMessage receives an incoming message, and potentially returns + // a response message to send back. + HandleMessage(context.Context, msg.NetMessage) (msg.NetMessage, error) +} + +// Service is a networking component that protocols can use to multiplex +// messages over the same channel, and to issue + handle requests. +type Service struct { + // Handler is the object registered to handle incoming requests. + Handler Handler + + // Requests are all the pending requests on this service. + Requests RequestMap + RequestsLock sync.RWMutex + + // cancel is the function to stop the Service + cancel context.CancelFunc + + // Message Pipe (connected to the outside world) + *msg.Pipe +} + +// NewService creates a service object with given type ID and Handler +func NewService(h Handler) *Service { + return &Service{ + Handler: h, + Requests: RequestMap{}, + Pipe: msg.NewPipe(10), + } +} + +// Start kicks off the Service goroutines. +func (s *Service) Start(ctx context.Context) error { + if s.cancel != nil { + return errors.New("Service already started.") + } + + // make a cancellable context. + ctx, s.cancel = context.WithCancel(ctx) + + go s.handleIncomingMessages(ctx) + return nil +} + +// Stop stops Service activity. +func (s *Service) Stop() { + s.cancel() + s.cancel = context.CancelFunc(nil) +} + +// GetPipe implements the mux.Protocol interface +func (s *Service) GetPipe() *msg.Pipe { + return s.Pipe +} + +// sendMessage sends a message out (actual leg work. SendMessage is to export w/o rid) +func (s *Service) sendMessage(ctx context.Context, m msg.NetMessage, rid RequestID) error { + + // serialize ServiceMessage wrapper + data, err := wrapData(m.Data(), rid) + if err != nil { + return err + } + + // send message + m2 := msg.New(m.Peer(), data) + select { + case s.Outgoing <- m2: + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} + +// SendMessage sends a message out +func (s *Service) SendMessage(ctx context.Context, m msg.NetMessage) error { + return s.sendMessage(ctx, m, nil) +} + +// SendRequest sends a request message out and awaits a response. +func (s *Service) SendRequest(ctx context.Context, m msg.NetMessage) (msg.NetMessage, error) { + + // create a request + r, err := NewRequest(m.Peer().ID) + if err != nil { + return nil, err + } + + // register Request + s.RequestsLock.Lock() + s.Requests[r.Key()] = r + s.RequestsLock.Unlock() + + // defer deleting this request + defer func() { + s.RequestsLock.Lock() + delete(s.Requests, r.Key()) + s.RequestsLock.Unlock() + }() + + // check if we should bail after waiting for mutex + select { + default: + case <-ctx.Done(): + return nil, ctx.Err() + } + + // Send message + s.sendMessage(ctx, m, r.ID) + + // wait for response + m = nil + err = nil + select { + case m = <-r.Response: + case <-ctx.Done(): + err = ctx.Err() + } + + return m, err +} + +// handleIncoming consumes the messages on the s.Incoming channel and +// routes them appropriately (to requests, or handler). +func (s *Service) handleIncomingMessages(ctx context.Context) { + for { + select { + case m := <-s.Incoming: + go s.handleIncomingMessage(ctx, m) + + case <-ctx.Done(): + return + } + } +} + +func (s *Service) handleIncomingMessage(ctx context.Context, m msg.NetMessage) { + + // unwrap the incoming message + data, rid, err := unwrapData(m.Data()) + if err != nil { + u.PErr("de-serializing error: %v\n", err) + } + m2 := msg.New(m.Peer(), data) + + // if it's a request (or has no RequestID), handle it + if rid == nil || rid.IsRequest() { + if s.Handler == nil { + u.PErr("service dropped msg: %v\n", m) + return // no handler, drop it. + } + + // should this be "go HandleMessage ... ?" + r1, err := s.Handler.HandleMessage(ctx, m2) + if err != nil { + u.PErr("handled message yielded error %v\n", err) + return + } + + // if handler gave us a response, send it back out! + if r1 != nil { + err := s.sendMessage(ctx, r1, rid.Response()) + if err != nil { + u.PErr("error sending response message: %v\n", err) + } + } + return + } + + // Otherwise, it is a response. handle it. + if !rid.IsResponse() { + u.PErr("RequestID should identify a response here.\n") + } + + key := RequestKey(m.Peer().ID, RequestID(rid)) + s.RequestsLock.RLock() + r, found := s.Requests[key] + s.RequestsLock.RUnlock() + + if !found { + u.PErr("no request key %v (timeout?)\n", []byte(key)) + return + } + + select { + case r.Response <- m2: + case <-ctx.Done(): + } +} + +func (s *Service) SetHandler(h Handler) { + s.Handler = h +} diff --git a/net/service/service_test.go b/net/service/service_test.go new file mode 100644 index 00000000000..6642117f30f --- /dev/null +++ b/net/service/service_test.go @@ -0,0 +1,146 @@ +package service + +import ( + "bytes" + "testing" + "time" + + msg "github.com/jbenet/go-ipfs/net/message" + peer "github.com/jbenet/go-ipfs/peer" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" +) + +// ReverseHandler reverses all Data it receives and sends it back. +type ReverseHandler struct{} + +func (t *ReverseHandler) HandleMessage(ctx context.Context, m msg.NetMessage) ( + msg.NetMessage, error) { + + d := m.Data() + for i, j := 0, len(d)-1; i < j; i, j = i+1, j-1 { + d[i], d[j] = d[j], d[i] + } + + return msg.New(m.Peer(), d), nil +} + +func newPeer(t *testing.T, id string) *peer.Peer { + mh, err := mh.FromHexString(id) + if err != nil { + t.Error(err) + return nil + } + + return &peer.Peer{ID: peer.ID(mh)} +} + +func TestServiceHandler(t *testing.T) { + ctx := context.Background() + h := &ReverseHandler{} + s := NewService(h) + peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa") + + if err := s.Start(ctx); err != nil { + t.Error(err) + } + + d, err := wrapData([]byte("beep"), nil) + if err != nil { + t.Error(err) + } + + m1 := msg.New(peer1, d) + s.Incoming <- m1 + m2 := <-s.Outgoing + + d, rid, err := unwrapData(m2.Data()) + if err != nil { + t.Error(err) + } + + if rid != nil { + t.Error("RequestID should be nil") + } + + if !bytes.Equal(d, []byte("peeb")) { + t.Errorf("service handler data incorrect: %v != %v", d, "oof") + } +} + +func TestServiceRequest(t *testing.T) { + ctx := context.Background() + s1 := NewService(&ReverseHandler{}) + s2 := NewService(&ReverseHandler{}) + + if err := s1.Start(ctx); err != nil { + t.Error(err) + } + + if err := s2.Start(ctx); err != nil { + t.Error(err) + } + + peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa") + + // patch services together + go func() { + for { + select { + case m := <-s1.Outgoing: + s2.Incoming <- m + case m := <-s2.Outgoing: + s1.Incoming <- m + case <-ctx.Done(): + return + } + } + }() + + m1 := msg.New(peer1, []byte("beep")) + m2, err := s1.SendRequest(ctx, m1) + if err != nil { + t.Error(err) + } + + if !bytes.Equal(m2.Data(), []byte("peeb")) { + t.Errorf("service handler data incorrect: %v != %v", m2.Data(), "oof") + } +} + +func TestServiceRequestTimeout(t *testing.T) { + ctx, _ := context.WithTimeout(context.Background(), time.Millisecond) + s1 := NewService(&ReverseHandler{}) + s2 := NewService(&ReverseHandler{}) + peer1 := newPeer(t, "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275aaaaaa") + + if err := s1.Start(ctx); err != nil { + t.Error(err) + } + + if err := s2.Start(ctx); err != nil { + t.Error(err) + } + + // patch services together + go func() { + for { + <-time.After(time.Millisecond) + select { + case m := <-s1.Outgoing: + s2.Incoming <- m + case m := <-s2.Outgoing: + s1.Incoming <- m + case <-ctx.Done(): + return + } + } + }() + + m1 := msg.New(peer1, []byte("beep")) + m2, err := s1.SendRequest(ctx, m1) + if err == nil || m2 != nil { + t.Error("should've timed out") + } +} diff --git a/net/swarm/conn.go b/net/swarm/conn.go new file mode 100644 index 00000000000..93bee663d78 --- /dev/null +++ b/net/swarm/conn.go @@ -0,0 +1,201 @@ +package swarm + +import ( + "errors" + "fmt" + "net" + + spipe "github.com/jbenet/go-ipfs/crypto/spipe" + conn "github.com/jbenet/go-ipfs/net/conn" + msg "github.com/jbenet/go-ipfs/net/message" + u "github.com/jbenet/go-ipfs/util" + + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" +) + +// Open listeners for each network the swarm should listen on +func (s *Swarm) listen() error { + hasErr := false + retErr := &ListenErr{ + Errors: make([]error, len(s.local.Addresses)), + } + + // listen on every address + for i, addr := range s.local.Addresses { + err := s.connListen(addr) + if err != nil { + hasErr = true + retErr.Errors[i] = err + u.PErr("Failed to listen on: %s [%s]", addr, err) + } + } + + if hasErr { + return retErr + } + return nil +} + +// Listen for new connections on the given multiaddr +func (s *Swarm) connListen(maddr *ma.Multiaddr) error { + netstr, addr, err := maddr.DialArgs() + if err != nil { + return err + } + + list, err := net.Listen(netstr, addr) + if err != nil { + return err + } + + // NOTE: this may require a lock around it later. currently, only run on setup + s.listeners = append(s.listeners, list) + + // Accept and handle new connections on this listener until it errors + go func() { + for { + nconn, err := list.Accept() + if err != nil { + e := fmt.Errorf("Failed to accept connection: %s - %s [%s]", + netstr, addr, err) + s.errChan <- e + + // if cancel is nil, we're closed. + if s.cancel == nil { + return + } + } else { + go s.handleIncomingConn(nconn) + } + } + }() + + return nil +} + +// Handle getting ID from this peer, handshake, and adding it into the map +func (s *Swarm) handleIncomingConn(nconn net.Conn) { + + c, err := conn.NewNetConn(nconn) + if err != nil { + s.errChan <- err + return + } + + //TODO(jbenet) the peer might potentially already be in the global PeerBook. + // maybe use the handshake to populate peer. + c.Peer.AddAddress(c.Addr) + + // Setup the new connection + err = s.connSetup(c) + if err != nil && err != ErrAlreadyOpen { + s.errChan <- err + c.Close() + } +} + +// connSetup adds the passed in connection to its peerMap and starts +// the fanIn routine for that connection +func (s *Swarm) connSetup(c *conn.Conn) error { + if c == nil { + return errors.New("Tried to start nil connection.") + } + + u.DOut("Starting connection: %s\n", c.Peer.Key().Pretty()) + + if err := s.connSecure(c); err != nil { + return fmt.Errorf("Conn securing error: %v", err) + } + + // add to conns + s.connsLock.Lock() + if _, ok := s.conns[c.Peer.Key()]; ok { + s.connsLock.Unlock() + return ErrAlreadyOpen + } + s.conns[c.Peer.Key()] = c + s.connsLock.Unlock() + + // kick off reader goroutine + go s.fanIn(c) + return nil +} + +// connSecure setups a secure remote connection. +func (s *Swarm) connSecure(c *conn.Conn) error { + + sp, err := spipe.NewSecurePipe(s.ctx, 10, s.local, c.Peer) + if err != nil { + return err + } + + err = sp.Wrap(s.ctx, spipe.Duplex{ + In: c.Incoming.MsgChan, + Out: c.Outgoing.MsgChan, + }) + if err != nil { + return err + } + + return nil +} + +// Handles the unwrapping + sending of messages to the right connection. +func (s *Swarm) fanOut() { + for { + select { + case <-s.ctx.Done(): + return // told to close. + + case msg, ok := <-s.Outgoing: + if !ok { + return + } + + s.connsLock.RLock() + conn, found := s.conns[msg.Peer().Key()] + s.connsLock.RUnlock() + + if !found { + e := fmt.Errorf("Sent msg to peer without open conn: %v", + msg.Peer) + s.errChan <- e + continue + } + + // queue it in the connection's buffer + conn.Outgoing.MsgChan <- msg.Data() + } + } +} + +// Handles the receiving + wrapping of messages, per conn. +// Consider using reflect.Select with one goroutine instead of n. +func (s *Swarm) fanIn(c *conn.Conn) { + for { + select { + case <-s.ctx.Done(): + // close Conn. + c.Close() + goto out + + case <-c.Closed: + goto out + + case data, ok := <-c.Incoming.MsgChan: + if !ok { + e := fmt.Errorf("Error retrieving from conn: %v", c.Peer.Key().Pretty()) + s.errChan <- e + goto out + } + + msg := msg.New(c.Peer, data) + s.Incoming <- msg + } + } + +out: + s.connsLock.Lock() + delete(s.conns, c.Peer.Key()) + s.connsLock.Unlock() +} diff --git a/net/swarm/swarm.go b/net/swarm/swarm.go new file mode 100644 index 00000000000..7ef4ce234c1 --- /dev/null +++ b/net/swarm/swarm.go @@ -0,0 +1,190 @@ +package swarm + +import ( + "errors" + "fmt" + "net" + "sync" + + conn "github.com/jbenet/go-ipfs/net/conn" + msg "github.com/jbenet/go-ipfs/net/message" + peer "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" +) + +// ErrAlreadyOpen signals that a connection to a peer is already open. +var ErrAlreadyOpen = errors.New("Error: Connection to this peer already open.") + +// ListenErr contains a set of errors mapping to each of the swarms addresses. +// Used to return multiple errors, as in listen. +type ListenErr struct { + Errors []error +} + +func (e *ListenErr) Error() string { + if e == nil { + return "" + } + var out string + for i, v := range e.Errors { + if v != nil { + out += fmt.Sprintf("%d: %s\n", i, v) + } + } + return out +} + +// Swarm is a connection muxer, allowing connections to other peers to +// be opened and closed, while still using the same Chan for all +// communication. The Chan sends/receives Messages, which note the +// destination or source Peer. +type Swarm struct { + + // local is the peer this swarm represents + local *peer.Peer + + // Swarm includes a Pipe object. + *msg.Pipe + + // errChan is the channel of errors. + errChan chan error + + // conns are the open connections the swarm is handling. + conns conn.Map + connsLock sync.RWMutex + + // listeners for each network address + listeners []net.Listener + + // cancel is an internal function used to stop the Swarm's processing. + cancel context.CancelFunc + ctx context.Context +} + +// NewSwarm constructs a Swarm, with a Chan. +func NewSwarm(ctx context.Context, local *peer.Peer) (*Swarm, error) { + s := &Swarm{ + Pipe: msg.NewPipe(10), + conns: conn.Map{}, + local: local, + errChan: make(chan error, 100), + } + + s.ctx, s.cancel = context.WithCancel(ctx) + go s.fanOut() + return s, s.listen() +} + +// Close stops a swarm. +func (s *Swarm) Close() error { + if s.cancel == nil { + return errors.New("Swarm already closed.") + } + + // issue cancel for the context + s.cancel() + + // set cancel to nil to prevent calling Close again, and signal to Listeners + s.cancel = nil + + // close listeners + for _, list := range s.listeners { + list.Close() + } + return nil +} + +// Dial connects to a peer. +// +// The idea is that the client of Swarm does not need to know what network +// the connection will happen over. Swarm can use whichever it choses. +// This allows us to use various transport protocols, do NAT traversal/relay, +// etc. to achive connection. +// +// For now, Dial uses only TCP. This will be extended. +func (s *Swarm) Dial(peer *peer.Peer) (*conn.Conn, error) { + if peer.ID.Equal(s.local.ID) { + return nil, errors.New("Attempted connection to self!") + } + + // check if we already have an open connection first + c := s.GetConnection(peer.ID) + + // open connection to peer + c, err := conn.Dial("tcp", peer) + if err != nil { + return nil, err + } + + if err := s.connSetup(c); err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// DialAddr is for connecting to a peer when you know their addr but not their ID. +// Should only be used when sure that not connected to peer in question +// TODO(jbenet) merge with Dial? need way to patch back. +func (s *Swarm) DialAddr(addr *ma.Multiaddr) (*conn.Conn, error) { + if addr == nil { + return nil, errors.New("addr must be a non-nil Multiaddr") + } + + npeer := new(peer.Peer) + npeer.AddAddress(addr) + + c, err := conn.Dial("tcp", npeer) + if err != nil { + return nil, err + } + + if err := s.connSetup(c); err != nil { + c.Close() + return nil, err + } + + return c, err +} + +// GetConnection returns the connection in the swarm to given peer.ID +func (s *Swarm) GetConnection(pid peer.ID) *conn.Conn { + s.connsLock.RLock() + c, found := s.conns[u.Key(pid)] + s.connsLock.RUnlock() + + if !found { + return nil + } + return c +} + +// CloseConnection removes a given peer from swarm + closes the connection +func (s *Swarm) CloseConnection(p *peer.Peer) error { + c := s.GetConnection(p.ID) + if c == nil { + return u.ErrNotFound + } + + s.connsLock.Lock() + delete(s.conns, u.Key(p.ID)) + s.connsLock.Unlock() + + return c.Close() +} + +func (s *Swarm) Error(e error) { + s.errChan <- e +} + +// GetErrChan returns the errors chan. +func (s *Swarm) GetErrChan() chan error { + return s.errChan +} + +// Temporary to ensure that the Swarm always matches the Network interface as we are changing it +// var _ Network = &Swarm{} diff --git a/swarm/swarm_test.go b/net/swarm/swarm_test.go similarity index 60% rename from swarm/swarm_test.go rename to net/swarm/swarm_test.go index e8a7af50d6f..246c812cef4 100644 --- a/swarm/swarm_test.go +++ b/net/swarm/swarm_test.go @@ -5,22 +5,26 @@ import ( "net" "testing" - msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio" + msg "github.com/jbenet/go-ipfs/net/message" peer "github.com/jbenet/go-ipfs/peer" u "github.com/jbenet/go-ipfs/util" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" + mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" ) -func pingListen(listener *net.TCPListener, peer *peer.Peer) { +func pingListen(t *testing.T, listener *net.TCPListener, peer *peer.Peer) { for { c, err := listener.Accept() if err == nil { - fmt.Println("accepted") - go pong(c, peer) + go pong(t, c, peer) } } } -func pong(c net.Conn, peer *peer.Peer) { +func pong(t *testing.T, c net.Conn, peer *peer.Peer) { mrw := msgio.NewReadWriter(c) for { data := make([]byte, 1024) @@ -29,22 +33,12 @@ func pong(c net.Conn, peer *peer.Peer) { fmt.Printf("error %v\n", err) return } - b, err := Unwrap(data[:n]) - if err != nil { - fmt.Printf("error %v\n", err) - return - } - if string(b.GetMessage()) != "ping" { - fmt.Printf("error: didn't receive ping: '%v'\n", b.GetMessage()) - return - } - - data, err = Wrap([]byte("pong"), PBWrapper_TEST) - if err != nil { - fmt.Printf("error %v\n", err) + d := string(data[:n]) + if d != "ping" { + t.Errorf("error: didn't receive ping: '%v'\n", d) return } - err = mrw.WriteMsg(data) + err = mrw.WriteMsg([]byte("pong")) if err != nil { fmt.Printf("error %v\n", err) return @@ -52,19 +46,43 @@ func pong(c net.Conn, peer *peer.Peer) { } } +func setupPeer(id string, addr string) (*peer.Peer, error) { + tcp, err := ma.NewMultiaddr(addr) + if err != nil { + return nil, err + } + + mh, err := mh.FromHexString(id) + if err != nil { + return nil, err + } + + p := &peer.Peer{ID: peer.ID(mh)} + p.AddAddress(tcp) + return p, nil +} + func TestSwarm(t *testing.T) { - swarm := NewSwarm(nil) + local, err := setupPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a30", + "/ip4/127.0.0.1/tcp/1234") + if err != nil { + t.Fatal("error setting up peer", err) + } + + swarm, err := NewSwarm(context.Background(), local) + if err != nil { + t.Error(err) + } var peers []*peer.Peer var listeners []net.Listener peerNames := map[string]string{ - "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a30": "/ip4/127.0.0.1/tcp/1234", "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31": "/ip4/127.0.0.1/tcp/2345", "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32": "/ip4/127.0.0.1/tcp/3456", "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33": "/ip4/127.0.0.1/tcp/4567", + "11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a34": "/ip4/127.0.0.1/tcp/5678", } - recv := swarm.GetChannel(PBWrapper_TEST) for k, n := range peerNames { peer, err := setupPeer(k, n) if err != nil { @@ -82,18 +100,13 @@ func TestSwarm(t *testing.T) { if err != nil { t.Fatal("error setting up listener", err) } - go pingListen(listener.(*net.TCPListener), peer) + go pingListen(t, listener.(*net.TCPListener), peer) - conn, err, _ := swarm.Dial(peer) + _, err = swarm.Dial(peer) if err != nil { t.Fatal("error swarm dialing to peer", err) } - //Since we arent doing a handshake, set up 'secure' channels - conn.secIn = conn.Incoming.MsgChan - conn.secOut = conn.Outgoing.MsgChan - - swarm.StartConn(conn) // ok done, add it. peers = append(peers, peer) listeners = append(listeners, listener) @@ -102,14 +115,14 @@ func TestSwarm(t *testing.T) { MsgNum := 1000 for k := 0; k < MsgNum; k++ { for _, p := range peers { - recv.Outgoing <- &Message{Peer: p, Data: []byte("ping")} + swarm.Outgoing <- &msg.Message{Peer: p, Data: []byte("ping")} } } got := map[u.Key]int{} for k := 0; k < (MsgNum * len(peers)); k++ { - msg := <-recv.Incoming + msg := <-swarm.Incoming if string(msg.Data) != "pong" { t.Error("unexpected conn output", msg.Data) } @@ -128,7 +141,6 @@ func TestSwarm(t *testing.T) { } } - fmt.Println("closing") swarm.Close() for _, listener := range listeners { listener.(*net.TCPListener).Close() diff --git a/peer/peer.go b/peer/peer.go index 870170c4baf..fb9dead0a10 100644 --- a/peer/peer.go +++ b/peer/peer.go @@ -21,6 +21,7 @@ func (id ID) Equal(other ID) bool { return bytes.Equal(id, other) } +// Pretty returns a b58-encoded string of the ID func (id ID) Pretty() string { return b58.Encode(id) } @@ -37,8 +38,9 @@ type Peer struct { PrivKey ic.PrivKey PubKey ic.PubKey - latency time.Duration - latenLock sync.RWMutex + latency time.Duration + + sync.RWMutex } // Key returns the ID as a Key (string) for maps. @@ -48,11 +50,22 @@ func (p *Peer) Key() u.Key { // AddAddress adds the given Multiaddr address to Peer's addresses. func (p *Peer) AddAddress(a *ma.Multiaddr) { + p.Lock() + defer p.Unlock() + + for _, addr := range p.Addresses { + if addr.Equal(a) { + return + } + } p.Addresses = append(p.Addresses, a) } // NetAddress returns the first Multiaddr found for a given network. func (p *Peer) NetAddress(n string) *ma.Multiaddr { + p.RLock() + defer p.RUnlock() + for _, a := range p.Addresses { ps, err := a.Protocols() if err != nil { @@ -68,17 +81,20 @@ func (p *Peer) NetAddress(n string) *ma.Multiaddr { return nil } +// GetLatency retrieves the current latency measurement. func (p *Peer) GetLatency() (out time.Duration) { - p.latenLock.RLock() + p.RLock() out = p.latency - p.latenLock.RUnlock() + p.RUnlock() return } +// SetLatency sets the latency measurement. // TODO: Instead of just keeping a single number, // keep a running average over the last hour or so +// Yep, should be EWMA or something. (-jbenet) func (p *Peer) SetLatency(laten time.Duration) { - p.latenLock.Lock() + p.Lock() p.latency = laten - p.latenLock.Unlock() + p.Unlock() } diff --git a/peer/peer_test.go b/peer/peer_test.go index e254c403d65..a873d6a6f4b 100644 --- a/peer/peer_test.go +++ b/peer/peer_test.go @@ -1,9 +1,10 @@ package peer import ( + "testing" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash" - "testing" ) func TestNetAddress(t *testing.T) { @@ -29,6 +30,11 @@ func TestNetAddress(t *testing.T) { p := Peer{ID: ID(mh)} p.AddAddress(tcp) p.AddAddress(udp) + p.AddAddress(tcp) + + if len(p.Addresses) == 3 { + t.Error("added same address twice") + } tcp2 := p.NetAddress("tcp") if tcp2 != tcp { diff --git a/peer/peerstore.go b/peer/peerstore.go new file mode 100644 index 00000000000..2184d89425e --- /dev/null +++ b/peer/peerstore.go @@ -0,0 +1,87 @@ +package peer + +import ( + "errors" + "sync" + + u "github.com/jbenet/go-ipfs/util" + + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" +) + +// Peerstore provides a threadsafe collection for peers. +type Peerstore interface { + Get(ID) (*Peer, error) + Put(*Peer) error + Delete(ID) error + All() (*Map, error) +} + +type peerstore struct { + sync.RWMutex + peers ds.Datastore +} + +// NewPeerstore creates a threadsafe collection of peers. +func NewPeerstore() Peerstore { + return &peerstore{ + peers: ds.NewMapDatastore(), + } +} + +func (p *peerstore) Get(i ID) (*Peer, error) { + p.RLock() + defer p.RUnlock() + + k := ds.NewKey(string(i)) + val, err := p.peers.Get(k) + if err != nil { + return nil, err + } + + peer, ok := val.(*Peer) + if !ok { + return nil, errors.New("stored value was not a Peer") + } + return peer, nil +} + +func (p *peerstore) Put(peer *Peer) error { + p.Lock() + defer p.Unlock() + + k := ds.NewKey(string(peer.ID)) + return p.peers.Put(k, peer) +} + +func (p *peerstore) Delete(i ID) error { + p.Lock() + defer p.Unlock() + + k := ds.NewKey(string(i)) + return p.peers.Delete(k) +} + +func (p *peerstore) All() (*Map, error) { + p.RLock() + defer p.RUnlock() + + l, err := p.peers.KeyList() + if err != nil { + return nil, err + } + + ps := &Map{} + for _, k := range l { + val, err := p.peers.Get(k) + if err != nil { + continue + } + + pval, ok := val.(*Peer) + if ok { + (*ps)[u.Key(k.String())] = pval + } + } + return ps, nil +} diff --git a/peer/peerstore_test.go b/peer/peerstore_test.go new file mode 100644 index 00000000000..18d977ff973 --- /dev/null +++ b/peer/peerstore_test.go @@ -0,0 +1,82 @@ +package peer + +import ( + "errors" + "testing" + + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" +) + +func setupPeer(id string, addr string) (*Peer, error) { + tcp, err := ma.NewMultiaddr(addr) + if err != nil { + return nil, err + } + + p := &Peer{ID: ID(id)} + p.AddAddress(tcp) + return p, nil +} + +func TestPeerstore(t *testing.T) { + + ps := NewPeerstore() + + p11, _ := setupPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31", "/ip4/127.0.0.1/tcp/1234") + p21, _ := setupPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32", "/ip4/127.0.0.1/tcp/2345") + // p31, _ := setupPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33", "/ip4/127.0.0.1/tcp/3456") + // p41, _ := setupPeer("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a34", "/ip4/127.0.0.1/tcp/4567") + + err := ps.Put(p11) + if err != nil { + t.Error(err) + } + + p12, err := ps.Get(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31")) + if err != nil { + t.Error(err) + } + + if p11 != p12 { + t.Error(errors.New("peers should be the same")) + } + + err = ps.Put(p21) + if err != nil { + t.Error(err) + } + + p22, err := ps.Get(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32")) + if err != nil { + t.Error(err) + } + + if p21 != p22 { + t.Error(errors.New("peers should be the same")) + } + + _, err = ps.Get(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")) + if err == nil { + t.Error(errors.New("should've been an error here")) + } + + err = ps.Delete(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31")) + if err != nil { + t.Error(err) + } + + _, err = ps.Get(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31")) + if err == nil { + t.Error(errors.New("should've been an error here")) + } + + p22, err = ps.Get(ID("11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32")) + if err != nil { + t.Error(err) + } + + if p21 != p22 { + t.Error(errors.New("peers should be the same")) + } + +} diff --git a/routing/dht/Makefile b/routing/dht/Makefile new file mode 100644 index 00000000000..563234b1d3c --- /dev/null +++ b/routing/dht/Makefile @@ -0,0 +1,11 @@ + +PB = $(wildcard *.proto) +GO = $(PB:.proto=.pb.go) + +all: $(GO) + +%.pb.go: %.proto + protoc --gogo_out=. --proto_path=../../../../:/usr/local/opt/protobuf/include:. $< + +clean: + rm *.pb.go diff --git a/routing/dht/Message.go b/routing/dht/Message.go index 21bd26a85d2..d82b3bb442f 100644 --- a/routing/dht/Message.go +++ b/routing/dht/Message.go @@ -3,21 +3,20 @@ package dht import ( "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" peer "github.com/jbenet/go-ipfs/peer" + u "github.com/jbenet/go-ipfs/util" ) -// Message is a a helper struct which makes working with protbuf types easier -type Message struct { - Type PBDHTMessage_MessageType - Key string - Value []byte - Response bool - ID string - Success bool - Peers []*peer.Peer +func newMessage(typ Message_MessageType, key string, level int) *Message { + m := &Message{ + Type: &typ, + Key: &key, + } + m.SetClusterLevel(level) + return m } -func peerInfo(p *peer.Peer) *PBDHTMessage_PBPeer { - pbp := new(PBDHTMessage_PBPeer) +func peerToPBPeer(p *peer.Peer) *Message_Peer { + pbp := new(Message_Peer) if len(p.Addresses) == 0 || p.Addresses[0] == nil { pbp.Addr = proto.String("") } else { @@ -33,23 +32,30 @@ func peerInfo(p *peer.Peer) *PBDHTMessage_PBPeer { return pbp } -// ToProtobuf takes a Message and produces a protobuf with it. -// TODO: building the protobuf message this way is a little wasteful -// Unused fields wont be omitted, find a better way to do this -func (m *Message) ToProtobuf() *PBDHTMessage { - pmes := new(PBDHTMessage) - if m.Value != nil { - pmes.Value = m.Value +func peersToPBPeers(peers []*peer.Peer) []*Message_Peer { + pbpeers := make([]*Message_Peer, len(peers)) + for i, p := range peers { + pbpeers[i] = peerToPBPeer(p) } + return pbpeers +} - pmes.Type = &m.Type - pmes.Key = &m.Key - pmes.Response = &m.Response - pmes.Id = &m.ID - pmes.Success = &m.Success - for _, p := range m.Peers { - pmes.Peers = append(pmes.Peers, peerInfo(p)) +// GetClusterLevel gets and adjusts the cluster level on the message. +// a +/- 1 adjustment is needed to distinguish a valid first level (1) and +// default "no value" protobuf behavior (0) +func (m *Message) GetClusterLevel() int { + level := m.GetClusterLevelRaw() - 1 + if level < 0 { + u.PErr("handleGetValue: no routing level specified, assuming 0\n") + level = 0 } + return int(level) +} - return pmes +// SetClusterLevel adjusts and sets the cluster level on the message. +// a +/- 1 adjustment is needed to distinguish a valid first level (1) and +// default "no value" protobuf behavior (0) +func (m *Message) SetClusterLevel(level int) { + lvl := int32(level) + m.ClusterLevelRaw = &lvl } diff --git a/routing/dht/dht.go b/routing/dht/dht.go index 83962e2108e..37205374f77 100644 --- a/routing/dht/dht.go +++ b/routing/dht/dht.go @@ -1,20 +1,21 @@ package dht import ( - "bytes" "crypto/rand" + "errors" "fmt" "sync" "time" + inet "github.com/jbenet/go-ipfs/net" + msg "github.com/jbenet/go-ipfs/net/message" peer "github.com/jbenet/go-ipfs/peer" kb "github.com/jbenet/go-ipfs/routing/kbucket" - swarm "github.com/jbenet/go-ipfs/swarm" u "github.com/jbenet/go-ipfs/util" - ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" - + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" + ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" ) @@ -28,12 +29,16 @@ type IpfsDHT struct { // NOTE: (currently, only a single table is used) routingTables []*kb.RoutingTable - network swarm.Network - netChan *swarm.Chan + // the network interface. service + network inet.Network + sender inet.Sender // Local peer (yourself) self *peer.Peer + // Other peers + peerstore peer.Peerstore + // Local data datastore ds.Datastore dslock sync.Mutex @@ -48,18 +53,17 @@ type IpfsDHT struct { //lock to make diagnostics work better diaglock sync.Mutex - - // listener is a server to register to listen for responses to messages - listener *swarm.MessageListener } // NewDHT creates a new DHT object with the given peer as the 'local' host -func NewDHT(p *peer.Peer, net swarm.Network, dstore ds.Datastore) *IpfsDHT { +func NewDHT(p *peer.Peer, ps peer.Peerstore, net inet.Network, sender inet.Sender, dstore ds.Datastore) *IpfsDHT { dht := new(IpfsDHT) dht.network = net - dht.netChan = net.GetChannel(swarm.PBWrapper_DHT_MESSAGE) + dht.sender = sender dht.datastore = dstore dht.self = p + dht.peerstore = ps + dht.providers = NewProviderManager(p.ID) dht.shutdown = make(chan struct{}) @@ -67,21 +71,32 @@ func NewDHT(p *peer.Peer, net swarm.Network, dstore ds.Datastore) *IpfsDHT { dht.routingTables[0] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Millisecond*30) dht.routingTables[1] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Millisecond*100) dht.routingTables[2] = kb.NewRoutingTable(20, kb.ConvertPeerID(p.ID), time.Hour) - dht.listener = swarm.NewMessageListener() dht.birth = time.Now() return dht } // Start up background goroutines needed by the DHT func (dht *IpfsDHT) Start() { - go dht.handleMessages() + panic("the service is already started. rmv this method") } // Connect to a new peer at the given address, ping and add to the routing table func (dht *IpfsDHT) Connect(addr *ma.Multiaddr) (*peer.Peer, error) { maddrstr, _ := addr.String() u.DOut("Connect to new peer: %s\n", maddrstr) - npeer, err := dht.network.ConnectNew(addr) + + // TODO(jbenet,whyrusleeping) + // + // Connect should take in a Peer (with ID). In a sense, we shouldn't be + // allowing connections to random multiaddrs without knowing who we're + // speaking to (i.e. peer.ID). In terms of moving around simple addresses + // -- instead of an (ID, Addr) pair -- we can use: + // + // /ip4/10.20.30.40/tcp/1234/ipfs/Qxhxxchxzcncxnzcnxzcxzm + // + npeer := &peer.Peer{} + npeer.AddAddress(addr) + err := dht.network.DialPeer(npeer) if err != nil { return nil, err } @@ -98,406 +113,167 @@ func (dht *IpfsDHT) Connect(addr *ma.Multiaddr) (*peer.Peer, error) { return npeer, nil } -// Read in all messages from swarm and handle them appropriately -// NOTE: this function is just a quick sketch -func (dht *IpfsDHT) handleMessages() { - u.DOut("Begin message handling routine\n") - - errs := dht.network.GetErrChan() - for { - select { - case mes, ok := <-dht.netChan.Incoming: - if !ok { - u.DOut("handleMessages closing, bad recv on incoming\n") - return - } - pmes := new(PBDHTMessage) - err := proto.Unmarshal(mes.Data, pmes) - if err != nil { - u.PErr("Failed to decode protobuf message: %s\n", err) - continue - } - - dht.Update(mes.Peer) - - // Note: not sure if this is the correct place for this - if pmes.GetResponse() { - dht.listener.Respond(pmes.GetId(), mes) - continue - } - // - - u.DOut("[peer: %s]\nGot message type: '%s' [id = %x, from = %s]\n", - dht.self.ID.Pretty(), - PBDHTMessage_MessageType_name[int32(pmes.GetType())], - pmes.GetId(), mes.Peer.ID.Pretty()) - switch pmes.GetType() { - case PBDHTMessage_GET_VALUE: - go dht.handleGetValue(mes.Peer, pmes) - case PBDHTMessage_PUT_VALUE: - go dht.handlePutValue(mes.Peer, pmes) - case PBDHTMessage_FIND_NODE: - go dht.handleFindPeer(mes.Peer, pmes) - case PBDHTMessage_ADD_PROVIDER: - go dht.handleAddProvider(mes.Peer, pmes) - case PBDHTMessage_GET_PROVIDERS: - go dht.handleGetProviders(mes.Peer, pmes) - case PBDHTMessage_PING: - go dht.handlePing(mes.Peer, pmes) - case PBDHTMessage_DIAGNOSTIC: - go dht.handleDiagnostic(mes.Peer, pmes) - default: - u.PErr("Recieved invalid message type") - } - - case err := <-errs: - u.PErr("dht err: %s\n", err) - case <-dht.shutdown: - return - } - } -} +// HandleMessage implements the inet.Handler interface. +func (dht *IpfsDHT) HandleMessage(ctx context.Context, mes msg.NetMessage) (msg.NetMessage, error) { -func (dht *IpfsDHT) putValueToNetwork(p *peer.Peer, key string, value []byte) error { - pmes := Message{ - Type: PBDHTMessage_PUT_VALUE, - Key: key, - Value: value, - ID: swarm.GenerateMessageID(), + mData := mes.Data() + if mData == nil { + return nil, errors.New("message did not include Data") } - mes := swarm.NewMessage(p, pmes.ToProtobuf()) - dht.netChan.Outgoing <- mes - return nil -} - -func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *PBDHTMessage) { - u.DOut("handleGetValue for key: %s\n", pmes.GetKey()) - dskey := ds.NewKey(pmes.GetKey()) - resp := &Message{ - Response: true, - ID: pmes.GetId(), - Key: pmes.GetKey(), - } - iVal, err := dht.datastore.Get(dskey) - if err == nil { - u.DOut("handleGetValue success!\n") - resp.Success = true - resp.Value = iVal.([]byte) - } else if err == ds.ErrNotFound { - // Check if we know any providers for the requested value - provs := dht.providers.GetProviders(u.Key(pmes.GetKey())) - if len(provs) > 0 { - u.DOut("handleGetValue returning %d provider[s]\n", len(provs)) - resp.Peers = provs - resp.Success = true - } else { - // No providers? - // Find closest peer on given cluster to desired key and reply with that info - - level := 0 - if len(pmes.GetValue()) < 1 { - // TODO: maybe return an error? Defaulting isnt a good idea IMO - u.PErr("handleGetValue: no routing level specified, assuming 0\n") - } else { - level = int(pmes.GetValue()[0]) // Using value field to specify cluster level - } - u.DOut("handleGetValue searching level %d clusters\n", level) - - closer := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey()))) - - if closer.ID.Equal(dht.self.ID) { - u.DOut("Attempted to return self! this shouldnt happen...\n") - resp.Peers = nil - goto out - } - // If this peer is closer than the one from the table, return nil - if kb.Closer(dht.self.ID, closer.ID, u.Key(pmes.GetKey())) { - resp.Peers = nil - u.DOut("handleGetValue could not find a closer node than myself.\n") - } else { - u.DOut("handleGetValue returning a closer peer: '%s'\n", closer.ID.Pretty()) - resp.Peers = []*peer.Peer{closer} - } - } - } else { - //temp: what other errors can a datastore return? - panic(err) + mPeer := mes.Peer() + if mPeer == nil { + return nil, errors.New("message did not include a Peer") } -out: - mes := swarm.NewMessage(p, resp.ToProtobuf()) - dht.netChan.Outgoing <- mes -} - -// Store a value in this peer local storage -func (dht *IpfsDHT) handlePutValue(p *peer.Peer, pmes *PBDHTMessage) { - dht.dslock.Lock() - defer dht.dslock.Unlock() - dskey := ds.NewKey(pmes.GetKey()) - err := dht.datastore.Put(dskey, pmes.GetValue()) + // deserialize msg + pmes := new(Message) + err := proto.Unmarshal(mData, pmes) if err != nil { - // For now, just panic, handle this better later maybe - panic(err) + return nil, fmt.Errorf("Failed to decode protobuf message: %v\n", err) } -} -func (dht *IpfsDHT) handlePing(p *peer.Peer, pmes *PBDHTMessage) { - u.DOut("[%s] Responding to ping from [%s]!\n", dht.self.ID.Pretty(), p.ID.Pretty()) - resp := Message{ - Type: pmes.GetType(), - Response: true, - ID: pmes.GetId(), - } + // update the peer (on valid msgs only) + dht.Update(mPeer) - dht.netChan.Outgoing <- swarm.NewMessage(p, resp.ToProtobuf()) -} + // Print out diagnostic + u.DOut("[peer: %s]\nGot message type: '%s' [from = %s]\n", + dht.self.ID.Pretty(), + Message_MessageType_name[int32(pmes.GetType())], mPeer.ID.Pretty()) -func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *PBDHTMessage) { - resp := Message{ - Type: pmes.GetType(), - ID: pmes.GetId(), - Response: true, - } - defer func() { - mes := swarm.NewMessage(p, resp.ToProtobuf()) - dht.netChan.Outgoing <- mes - }() - level := pmes.GetValue()[0] - u.DOut("handleFindPeer: searching for '%s'\n", peer.ID(pmes.GetKey()).Pretty()) - closest := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey()))) - if closest == nil { - u.PErr("handleFindPeer: could not find anything.\n") - return + // get handler for this msg type. + var resp *Message + handler := dht.handlerForMsgType(pmes.GetType()) + if handler == nil { + return nil, errors.New("Recieved invalid message type") } - if len(closest.Addresses) == 0 { - u.PErr("handleFindPeer: no addresses for connected peer...\n") - return + // dispatch handler. + rpmes, err := handler(mPeer, pmes) + if err != nil { + return nil, err } - // If the found peer further away than this peer... - if kb.Closer(dht.self.ID, closest.ID, u.Key(pmes.GetKey())) { - return + // serialize response msg + rmes, err := msg.FromObject(mPeer, rpmes) + if err != nil { + return nil, fmt.Errorf("Failed to encode protobuf message: %v\n", err) } - u.DOut("handleFindPeer: sending back '%s'\n", closest.ID.Pretty()) - resp.Peers = []*peer.Peer{closest} - resp.Success = true + return rmes, nil } -func (dht *IpfsDHT) handleGetProviders(p *peer.Peer, pmes *PBDHTMessage) { - resp := Message{ - Type: PBDHTMessage_GET_PROVIDERS, - Key: pmes.GetKey(), - ID: pmes.GetId(), - Response: true, - } +// sendRequest sends out a request using dht.sender, but also makes sure to +// measure the RTT for latency measurements. +func (dht *IpfsDHT) sendRequest(ctx context.Context, p *peer.Peer, pmes *Message) (*Message, error) { - has, err := dht.datastore.Has(ds.NewKey(pmes.GetKey())) + mes, err := msg.FromObject(p, pmes) if err != nil { - dht.netChan.Errors <- err - } - - providers := dht.providers.GetProviders(u.Key(pmes.GetKey())) - if has { - providers = append(providers, dht.self) - } - if providers == nil || len(providers) == 0 { - level := 0 - if len(pmes.GetValue()) > 0 { - level = int(pmes.GetValue()[0]) - } - - closer := dht.routingTables[level].NearestPeer(kb.ConvertKey(u.Key(pmes.GetKey()))) - if kb.Closer(dht.self.ID, closer.ID, u.Key(pmes.GetKey())) { - resp.Peers = nil - } else { - resp.Peers = []*peer.Peer{closer} - } - } else { - resp.Peers = providers - resp.Success = true + return nil, err } - mes := swarm.NewMessage(p, resp.ToProtobuf()) - dht.netChan.Outgoing <- mes -} - -type providerInfo struct { - Creation time.Time - Value *peer.Peer -} + start := time.Now() -func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *PBDHTMessage) { - key := u.Key(pmes.GetKey()) - u.DOut("[%s] Adding [%s] as a provider for '%s'\n", dht.self.ID.Pretty(), p.ID.Pretty(), peer.ID(key).Pretty()) - dht.providers.AddProvider(key, p) -} - -// Halt stops all communications from this peer and shut down -func (dht *IpfsDHT) Halt() { - dht.shutdown <- struct{}{} - dht.network.Close() - dht.providers.Halt() - dht.listener.Halt() -} - -// NOTE: not yet finished, low priority -func (dht *IpfsDHT) handleDiagnostic(p *peer.Peer, pmes *PBDHTMessage) { - seq := dht.routingTables[0].NearestPeers(kb.ConvertPeerID(dht.self.ID), 10) - listenChan := dht.listener.Listen(pmes.GetId(), len(seq), time.Second*30) - - for _, ps := range seq { - mes := swarm.NewMessage(ps, pmes) - dht.netChan.Outgoing <- mes + rmes, err := dht.sender.SendRequest(ctx, mes) + if err != nil { + return nil, err } - buf := new(bytes.Buffer) - di := dht.getDiagInfo() - buf.Write(di.Marshal()) - - // NOTE: this shouldnt be a hardcoded value - after := time.After(time.Second * 20) - count := len(seq) - for count > 0 { - select { - case <-after: - //Timeout, return what we have - goto out - case reqResp := <-listenChan: - pmesOut := new(PBDHTMessage) - err := proto.Unmarshal(reqResp.Data, pmesOut) - if err != nil { - // It broke? eh, whatever, keep going - continue - } - buf.Write(reqResp.Data) - count-- - } - } + rtt := time.Since(start) + rmes.Peer().SetLatency(rtt) -out: - resp := Message{ - Type: PBDHTMessage_DIAGNOSTIC, - ID: pmes.GetId(), - Value: buf.Bytes(), - Response: true, + rpmes := new(Message) + if err := proto.Unmarshal(rmes.Data(), rpmes); err != nil { + return nil, err } - mes := swarm.NewMessage(p, resp.ToProtobuf()) - dht.netChan.Outgoing <- mes + return rpmes, nil } -func (dht *IpfsDHT) getValueOrPeers(p *peer.Peer, key u.Key, timeout time.Duration, level int) ([]byte, []*peer.Peer, error) { - pmes, err := dht.getValueSingle(p, key, timeout, level) +func (dht *IpfsDHT) getValueOrPeers(ctx context.Context, p *peer.Peer, + key u.Key, level int) ([]byte, []*peer.Peer, error) { + + pmes, err := dht.getValueSingle(ctx, p, key, level) if err != nil { return nil, nil, err } - if pmes.GetSuccess() { - if pmes.Value == nil { // We were given provider[s] - val, err := dht.getFromPeerList(key, timeout, pmes.GetPeers(), level) - if err != nil { - return nil, nil, err - } - return val, nil, nil - } - + if value := pmes.GetValue(); value != nil { // Success! We were given the value - return pmes.GetValue(), nil, nil + return value, nil, nil } - // We were given a closer node + // TODO decide on providers. This probably shouldn't be happening. + // if prv := pmes.GetProviderPeers(); prv != nil && len(prv) > 0 { + // val, err := dht.getFromPeerList(key, timeout,, level) + // if err != nil { + // return nil, nil, err + // } + // return val, nil, nil + // } + + // Perhaps we were given closer peers var peers []*peer.Peer - for _, pb := range pmes.GetPeers() { + for _, pb := range pmes.GetCloserPeers() { if peer.ID(pb.GetId()).Equal(dht.self.ID) { continue } + addr, err := ma.NewMultiaddr(pb.GetAddr()) if err != nil { u.PErr("%v\n", err.Error()) continue } - np, err := dht.network.GetConnection(peer.ID(pb.GetId()), addr) - if err != nil { - u.PErr("%v\n", err.Error()) - continue + // check if we already have this peer. + pr, _ := dht.peerstore.Get(peer.ID(pb.GetId())) + if pr == nil { + pr = &peer.Peer{ID: peer.ID(pb.GetId())} + dht.peerstore.Put(pr) } + pr.AddAddress(addr) // idempotent + peers = append(peers, pr) + } - peers = append(peers, np) + if len(peers) > 0 { + return nil, peers, nil } - return nil, peers, nil + + return nil, nil, errors.New("NotFound. did not get value or closer peers.") } // getValueSingle simply performs the get value RPC with the given parameters -func (dht *IpfsDHT) getValueSingle(p *peer.Peer, key u.Key, timeout time.Duration, level int) (*PBDHTMessage, error) { - pmes := Message{ - Type: PBDHTMessage_GET_VALUE, - Key: string(key), - Value: []byte{byte(level)}, - ID: swarm.GenerateMessageID(), - } - responseChan := dht.listener.Listen(pmes.ID, 1, time.Minute) - - mes := swarm.NewMessage(p, pmes.ToProtobuf()) - t := time.Now() - dht.netChan.Outgoing <- mes - - // Wait for either the response or a timeout - timeup := time.After(timeout) - select { - case <-timeup: - dht.listener.Unlisten(pmes.ID) - return nil, u.ErrTimeout - case resp, ok := <-responseChan: - if !ok { - u.PErr("response channel closed before timeout, please investigate.\n") - return nil, u.ErrTimeout - } - roundtrip := time.Since(t) - resp.Peer.SetLatency(roundtrip) - pmesOut := new(PBDHTMessage) - err := proto.Unmarshal(resp.Data, pmesOut) - if err != nil { - return nil, err - } - return pmesOut, nil - } +func (dht *IpfsDHT) getValueSingle(ctx context.Context, p *peer.Peer, + key u.Key, level int) (*Message, error) { + + pmes := newMessage(Message_GET_VALUE, string(key), level) + return dht.sendRequest(ctx, p, pmes) } // TODO: Im not certain on this implementation, we get a list of peers/providers // from someone what do we do with it? Connect to each of them? randomly pick // one to get the value from? Or just connect to one at a time until we get a // successful connection and request the value from it? -func (dht *IpfsDHT) getFromPeerList(key u.Key, timeout time.Duration, - peerlist []*PBDHTMessage_PBPeer, level int) ([]byte, error) { +func (dht *IpfsDHT) getFromPeerList(ctx context.Context, key u.Key, + peerlist []*Message_Peer, level int) ([]byte, error) { + for _, pinfo := range peerlist { - p, _ := dht.Find(peer.ID(pinfo.GetId())) - if p == nil { - maddr, err := ma.NewMultiaddr(pinfo.GetAddr()) - if err != nil { - u.PErr("getValue error: %s\n", err) - continue - } - - p, err = dht.network.GetConnection(peer.ID(pinfo.GetId()), maddr) - if err != nil { - u.PErr("getValue error: %s\n", err) - continue - } + p, err := dht.ensureConnectedToPeer(pinfo) + if err != nil { + u.DErr("getFromPeers error: %s\n", err) + continue } - pmes, err := dht.getValueSingle(p, key, timeout, level) + + pmes, err := dht.getValueSingle(ctx, p, key, level) if err != nil { u.DErr("getFromPeers error: %s\n", err) continue } - dht.providers.AddProvider(key, p) - // Make sure it was a successful get - if pmes.GetSuccess() && pmes.Value != nil { - return pmes.GetValue(), nil + if value := pmes.GetValue(); value != nil { + // Success! We were given the value + dht.providers.AddProvider(key, p) + return value, nil } } return nil, u.ErrNotFound @@ -510,31 +286,37 @@ func (dht *IpfsDHT) getLocal(key u.Key) ([]byte, error) { if err != nil { return nil, err } - return v.([]byte), nil + + byt, ok := v.([]byte) + if !ok { + return byt, errors.New("value stored in datastore not []byte") + } + return byt, nil } func (dht *IpfsDHT) putLocal(key u.Key, value []byte) error { return dht.datastore.Put(ds.NewKey(string(key)), value) } -// Update TODO(chas) Document this function +// Update signals to all routingTables to Update their last-seen status +// on the given peer. func (dht *IpfsDHT) Update(p *peer.Peer) { + removedCount := 0 for _, route := range dht.routingTables { removed := route.Update(p) - // Only drop the connection if no tables refer to this peer + // Only close the connection if no tables refer to this peer if removed != nil { - found := false - for _, r := range dht.routingTables { - if r.Find(removed.ID) != nil { - found = true - break - } - } - if !found { - dht.network.Drop(removed) - } + removedCount++ } } + + // Only close the connection if no tables refer to this peer + // if removedCount == len(dht.routingTables) { + // dht.network.ClosePeer(p) + // } + // ACTUALLY, no, let's not just close the connection. it may be connected + // due to other things. it seems that we just need connection timeouts + // after some deadline of inactivity. } // Find looks for a peer with a given ID connected to this dht and returns the peer and the table it was found in. @@ -548,34 +330,9 @@ func (dht *IpfsDHT) Find(id peer.ID) (*peer.Peer, *kb.RoutingTable) { return nil, nil } -func (dht *IpfsDHT) findPeerSingle(p *peer.Peer, id peer.ID, timeout time.Duration, level int) (*PBDHTMessage, error) { - pmes := Message{ - Type: PBDHTMessage_FIND_NODE, - Key: string(id), - ID: swarm.GenerateMessageID(), - Value: []byte{byte(level)}, - } - - mes := swarm.NewMessage(p, pmes.ToProtobuf()) - listenChan := dht.listener.Listen(pmes.ID, 1, time.Minute) - t := time.Now() - dht.netChan.Outgoing <- mes - after := time.After(timeout) - select { - case <-after: - dht.listener.Unlisten(pmes.ID) - return nil, u.ErrTimeout - case resp := <-listenChan: - roundtrip := time.Since(t) - resp.Peer.SetLatency(roundtrip) - pmesOut := new(PBDHTMessage) - err := proto.Unmarshal(resp.Data, pmesOut) - if err != nil { - return nil, err - } - - return pmesOut, nil - } +func (dht *IpfsDHT) findPeerSingle(ctx context.Context, p *peer.Peer, id peer.ID, level int) (*Message, error) { + pmes := newMessage(Message_FIND_NODE, string(id), level) + return dht.sendRequest(ctx, p, pmes) } func (dht *IpfsDHT) printTables() { @@ -584,67 +341,102 @@ func (dht *IpfsDHT) printTables() { } } -func (dht *IpfsDHT) findProvidersSingle(p *peer.Peer, key u.Key, level int, timeout time.Duration) (*PBDHTMessage, error) { - pmes := Message{ - Type: PBDHTMessage_GET_PROVIDERS, - Key: string(key), - ID: swarm.GenerateMessageID(), - Value: []byte{byte(level)}, - } - - mes := swarm.NewMessage(p, pmes.ToProtobuf()) - - listenChan := dht.listener.Listen(pmes.ID, 1, time.Minute) - dht.netChan.Outgoing <- mes - after := time.After(timeout) - select { - case <-after: - dht.listener.Unlisten(pmes.ID) - return nil, u.ErrTimeout - case resp := <-listenChan: - u.DOut("FindProviders: got response.\n") - pmesOut := new(PBDHTMessage) - err := proto.Unmarshal(resp.Data, pmesOut) - if err != nil { - return nil, err - } - - return pmesOut, nil - } +func (dht *IpfsDHT) findProvidersSingle(ctx context.Context, p *peer.Peer, key u.Key, level int) (*Message, error) { + pmes := newMessage(Message_GET_PROVIDERS, string(key), level) + return dht.sendRequest(ctx, p, pmes) } // TODO: Could be done async -func (dht *IpfsDHT) addPeerList(key u.Key, peers []*PBDHTMessage_PBPeer) []*peer.Peer { +func (dht *IpfsDHT) addProviders(key u.Key, peers []*Message_Peer) []*peer.Peer { var provArr []*peer.Peer for _, prov := range peers { - // Dont add outselves to the list - if peer.ID(prov.GetId()).Equal(dht.self.ID) { + p, err := dht.peerFromInfo(prov) + if err != nil { + u.PErr("error getting peer from info: %v\n", err) continue } - // Dont add someone who is already on the list - p := dht.network.Find(u.Key(prov.GetId())) - if p == nil { - u.DOut("given provider %s was not in our network already.\n", peer.ID(prov.GetId()).Pretty()) - var err error - p, err = dht.peerFromInfo(prov) - if err != nil { - u.PErr("error connecting to new peer: %s\n", err) - continue - } + + // Dont add outselves to the list + if p.ID.Equal(dht.self.ID) { + continue } + + // TODO(jbenet) ensure providers is idempotent dht.providers.AddProvider(key, p) provArr = append(provArr, p) } return provArr } -func (dht *IpfsDHT) peerFromInfo(pbp *PBDHTMessage_PBPeer) (*peer.Peer, error) { - maddr, err := ma.NewMultiaddr(pbp.GetAddr()) +// nearestPeerToQuery returns the routing tables closest peers. +func (dht *IpfsDHT) nearestPeerToQuery(pmes *Message) *peer.Peer { + level := pmes.GetClusterLevel() + cluster := dht.routingTables[level] + + key := u.Key(pmes.GetKey()) + closer := cluster.NearestPeer(kb.ConvertKey(key)) + return closer +} + +// betterPeerToQuery returns nearestPeerToQuery, but iff closer than self. +func (dht *IpfsDHT) betterPeerToQuery(pmes *Message) *peer.Peer { + closer := dht.nearestPeerToQuery(pmes) + + // no node? nil + if closer == nil { + return nil + } + + // == to self? nil + if closer.ID.Equal(dht.self.ID) { + u.DOut("Attempted to return self! this shouldnt happen...\n") + return nil + } + + // self is closer? nil + key := u.Key(pmes.GetKey()) + if kb.Closer(dht.self.ID, closer.ID, key) { + return nil + } + + // ok seems like a closer node. + return closer +} + +func (dht *IpfsDHT) peerFromInfo(pbp *Message_Peer) (*peer.Peer, error) { + + id := peer.ID(pbp.GetId()) + p, _ := dht.peerstore.Get(id) + if p == nil { + p, _ = dht.Find(id) + if p != nil { + panic("somehow peer not getting into peerstore") + } + } + + if p == nil { + maddr, err := ma.NewMultiaddr(pbp.GetAddr()) + if err != nil { + return nil, err + } + + // create new Peer + p := &peer.Peer{ID: id} + p.AddAddress(maddr) + dht.peerstore.Put(p) + } + return p, nil +} + +func (dht *IpfsDHT) ensureConnectedToPeer(pbp *Message_Peer) (*peer.Peer, error) { + p, err := dht.peerFromInfo(pbp) if err != nil { return nil, err } - return dht.network.GetConnection(peer.ID(pbp.GetId()), maddr) + // dial connection + err = dht.network.DialPeer(p) + return p, err } func (dht *IpfsDHT) loadProvidableKeys() error { @@ -658,7 +450,7 @@ func (dht *IpfsDHT) loadProvidableKeys() error { return nil } -// Builds up list of peers by requesting random peer IDs +// Bootstrap builds up list of peers by requesting random peer IDs func (dht *IpfsDHT) Bootstrap() { id := make([]byte, 16) rand.Read(id) diff --git a/routing/dht/dht_test.go b/routing/dht/dht_test.go index f5323086448..b136ab3d6fd 100644 --- a/routing/dht/dht_test.go +++ b/routing/dht/dht_test.go @@ -6,9 +6,9 @@ import ( ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" ci "github.com/jbenet/go-ipfs/crypto" - identify "github.com/jbenet/go-ipfs/identify" + spipe "github.com/jbenet/go-ipfs/crypto/spipe" + swarm "github.com/jbenet/go-ipfs/net/swarm" peer "github.com/jbenet/go-ipfs/peer" - swarm "github.com/jbenet/go-ipfs/swarm" u "github.com/jbenet/go-ipfs/util" "bytes" @@ -36,7 +36,7 @@ func setupDHTS(n int, t *testing.T) ([]*ma.Multiaddr, []*peer.Peer, []*IpfsDHT) } p.PubKey = pk p.PrivKey = sk - id, err := identify.IDFromPubKey(pk) + id, err := spipe.IDFromPubKey(pk) if err != nil { panic(err) } @@ -68,7 +68,7 @@ func makePeer(addr *ma.Multiaddr) *peer.Peer { } p.PrivKey = sk p.PubKey = pk - id, err := identify.IDFromPubKey(pk) + id, err := spipe.IDFromPubKey(pk) if err != nil { panic(err) } diff --git a/routing/dht/ext_test.go b/routing/dht/ext_test.go index 82337bfa6f4..fe98443adbe 100644 --- a/routing/dht/ext_test.go +++ b/routing/dht/ext_test.go @@ -9,8 +9,8 @@ import ( ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" + swarm "github.com/jbenet/go-ipfs/net/swarm" peer "github.com/jbenet/go-ipfs/peer" - swarm "github.com/jbenet/go-ipfs/swarm" u "github.com/jbenet/go-ipfs/util" "time" diff --git a/routing/dht/handlers.go b/routing/dht/handlers.go new file mode 100644 index 00000000000..710478e4514 --- /dev/null +++ b/routing/dht/handlers.go @@ -0,0 +1,251 @@ +package dht + +import ( + "errors" + "fmt" + "time" + + msg "github.com/jbenet/go-ipfs/net/message" + peer "github.com/jbenet/go-ipfs/peer" + kb "github.com/jbenet/go-ipfs/routing/kbucket" + u "github.com/jbenet/go-ipfs/util" + + context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context" + ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/datastore.go" +) + +// dhthandler specifies the signature of functions that handle DHT messages. +type dhtHandler func(*peer.Peer, *Message) (*Message, error) + +func (dht *IpfsDHT) handlerForMsgType(t Message_MessageType) dhtHandler { + switch t { + case Message_GET_VALUE: + return dht.handleGetValue + case Message_PUT_VALUE: + return dht.handlePutValue + case Message_FIND_NODE: + return dht.handleFindPeer + case Message_ADD_PROVIDER: + return dht.handleAddProvider + case Message_GET_PROVIDERS: + return dht.handleGetProviders + case Message_PING: + return dht.handlePing + case Message_DIAGNOSTIC: + return dht.handleDiagnostic + default: + return nil + } +} + +func (dht *IpfsDHT) putValueToNetwork(p *peer.Peer, key string, value []byte) error { + typ := Message_PUT_VALUE + pmes := newMessage(Message_PUT_VALUE, string(key), 0) + pmes.Value = value + + mes, err := msg.FromObject(p, pmes) + if err != nil { + return err + } + return dht.sender.SendMessage(context.TODO(), mes) +} + +func (dht *IpfsDHT) handleGetValue(p *peer.Peer, pmes *Message) (*Message, error) { + u.DOut("handleGetValue for key: %s\n", pmes.GetKey()) + + // setup response + resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) + + // first, is the key even a key? + key := pmes.GetKey() + if key == "" { + return nil, errors.New("handleGetValue but no key was provided") + } + + // let's first check if we have the value locally. + dskey := ds.NewKey(pmes.GetKey()) + iVal, err := dht.datastore.Get(dskey) + + // if we got an unexpected error, bail. + if err != ds.ErrNotFound { + return nil, err + } + + // if we have the value, respond with it! + if err == nil { + u.DOut("handleGetValue success!\n") + + byts, ok := iVal.([]byte) + if !ok { + return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey) + } + + resp.Value = byts + return resp, nil + } + + // if we know any providers for the requested value, return those. + provs := dht.providers.GetProviders(u.Key(pmes.GetKey())) + if len(provs) > 0 { + u.DOut("handleGetValue returning %d provider[s]\n", len(provs)) + resp.ProviderPeers = peersToPBPeers(provs) + return resp, nil + } + + // Find closest peer on given cluster to desired key and reply with that info + closer := dht.betterPeerToQuery(pmes) + if closer == nil { + u.DOut("handleGetValue could not find a closer node than myself.\n") + resp.CloserPeers = nil + return resp, nil + } + + // we got a closer peer, it seems. return it. + u.DOut("handleGetValue returning a closer peer: '%s'\n", closer.ID.Pretty()) + resp.CloserPeers = peersToPBPeers([]*peer.Peer{closer}) + return resp, nil +} + +// Store a value in this peer local storage +func (dht *IpfsDHT) handlePutValue(p *peer.Peer, pmes *Message) (*Message, error) { + dht.dslock.Lock() + defer dht.dslock.Unlock() + dskey := ds.NewKey(pmes.GetKey()) + err := dht.datastore.Put(dskey, pmes.GetValue()) + return nil, err +} + +func (dht *IpfsDHT) handlePing(p *peer.Peer, pmes *Message) (*Message, error) { + u.DOut("[%s] Responding to ping from [%s]!\n", dht.self.ID.Pretty(), p.ID.Pretty()) + + return newMessage(pmes.GetType(), "", int(pmes.GetClusterLevel())), nil +} + +func (dht *IpfsDHT) handleFindPeer(p *peer.Peer, pmes *Message) (*Message, error) { + resp := newMessage(pmes.GetType(), "", pmes.GetClusterLevel()) + var closest *peer.Peer + + // if looking for self... special case where we send it on CloserPeers. + if peer.ID(pmes.GetKey()).Equal(dht.self.ID) { + closest = dht.self + } else { + closest = dht.betterPeerToQuery(pmes) + } + + if closest == nil { + u.PErr("handleFindPeer: could not find anything.\n") + return resp, nil + } + + if len(closest.Addresses) == 0 { + u.PErr("handleFindPeer: no addresses for connected peer...\n") + return resp, nil + } + + u.DOut("handleFindPeer: sending back '%s'\n", closest.ID.Pretty()) + resp.CloserPeers = peersToPBPeers([]*peer.Peer{closest}) + return resp, nil +} + +func (dht *IpfsDHT) handleGetProviders(p *peer.Peer, pmes *Message) (*Message, error) { + resp := newMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel()) + + // check if we have this value, to add ourselves as provider. + has, err := dht.datastore.Has(ds.NewKey(pmes.GetKey())) + if err != nil && err != ds.ErrNotFound { + u.PErr("unexpected datastore error: %v\n", err) + has = false + } + + // setup providers + providers := dht.providers.GetProviders(u.Key(pmes.GetKey())) + if has { + providers = append(providers, dht.self) + } + + // if we've got providers, send thos those. + if providers != nil && len(providers) > 0 { + resp.ProviderPeers = peersToPBPeers(providers) + } + + // Also send closer peers. + closer := dht.betterPeerToQuery(pmes) + if closer != nil { + resp.CloserPeers = peersToPBPeers([]*peer.Peer{closer}) + } + + return resp, nil +} + +type providerInfo struct { + Creation time.Time + Value *peer.Peer +} + +func (dht *IpfsDHT) handleAddProvider(p *peer.Peer, pmes *Message) (*Message, error) { + key := u.Key(pmes.GetKey()) + + u.DOut("[%s] Adding [%s] as a provider for '%s'\n", + dht.self.ID.Pretty(), p.ID.Pretty(), peer.ID(key).Pretty()) + + dht.providers.AddProvider(key, p) + return nil, nil +} + +// Halt stops all communications from this peer and shut down +// TODO -- remove this in favor of context +func (dht *IpfsDHT) Halt() { + dht.shutdown <- struct{}{} + dht.network.Close() + dht.providers.Halt() +} + +// NOTE: not yet finished, low priority +func (dht *IpfsDHT) handleDiagnostic(p *peer.Peer, pmes *Message) (*Message, error) { + seq := dht.routingTables[0].NearestPeers(kb.ConvertPeerID(dht.self.ID), 10) + + for _, ps := range seq { + mes, err := msg.FromObject(ps, pmes) + if err != nil { + u.PErr("handleDiagnostics error creating message: %v\n", err) + continue + } + // dht.sender.SendRequest(context.TODO(), mes) + } + return nil, errors.New("not yet ported back") + + // buf := new(bytes.Buffer) + // di := dht.getDiagInfo() + // buf.Write(di.Marshal()) + // + // // NOTE: this shouldnt be a hardcoded value + // after := time.After(time.Second * 20) + // count := len(seq) + // for count > 0 { + // select { + // case <-after: + // //Timeout, return what we have + // goto out + // case reqResp := <-listenChan: + // pmesOut := new(Message) + // err := proto.Unmarshal(reqResp.Data, pmesOut) + // if err != nil { + // // It broke? eh, whatever, keep going + // continue + // } + // buf.Write(reqResp.Data) + // count-- + // } + // } + // + // out: + // resp := Message{ + // Type: Message_DIAGNOSTIC, + // ID: pmes.GetId(), + // Value: buf.Bytes(), + // Response: true, + // } + // + // mes := swarm.NewMessage(p, resp.ToProtobuf()) + // dht.netChan.Outgoing <- mes +} diff --git a/routing/dht/messages.pb.go b/routing/dht/messages.pb.go index 90c936eb912..b6e9fa4f26c 100644 --- a/routing/dht/messages.pb.go +++ b/routing/dht/messages.pb.go @@ -1,4 +1,4 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-gogo. // source: messages.proto // DO NOT EDIT! @@ -9,30 +9,32 @@ It is generated from these files: messages.proto It has these top-level messages: - PBDHTMessage + Message */ package dht -import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" +import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/proto" +import json "encoding/json" import math "math" -// Reference imports to suppress errors if they are not otherwise used. +// Reference proto, json, and math imports to suppress error if they are not otherwise used. var _ = proto.Marshal +var _ = &json.SyntaxError{} var _ = math.Inf -type PBDHTMessage_MessageType int32 +type Message_MessageType int32 const ( - PBDHTMessage_PUT_VALUE PBDHTMessage_MessageType = 0 - PBDHTMessage_GET_VALUE PBDHTMessage_MessageType = 1 - PBDHTMessage_ADD_PROVIDER PBDHTMessage_MessageType = 2 - PBDHTMessage_GET_PROVIDERS PBDHTMessage_MessageType = 3 - PBDHTMessage_FIND_NODE PBDHTMessage_MessageType = 4 - PBDHTMessage_PING PBDHTMessage_MessageType = 5 - PBDHTMessage_DIAGNOSTIC PBDHTMessage_MessageType = 6 + Message_PUT_VALUE Message_MessageType = 0 + Message_GET_VALUE Message_MessageType = 1 + Message_ADD_PROVIDER Message_MessageType = 2 + Message_GET_PROVIDERS Message_MessageType = 3 + Message_FIND_NODE Message_MessageType = 4 + Message_PING Message_MessageType = 5 + Message_DIAGNOSTIC Message_MessageType = 6 ) -var PBDHTMessage_MessageType_name = map[int32]string{ +var Message_MessageType_name = map[int32]string{ 0: "PUT_VALUE", 1: "GET_VALUE", 2: "ADD_PROVIDER", @@ -41,7 +43,7 @@ var PBDHTMessage_MessageType_name = map[int32]string{ 5: "PING", 6: "DIAGNOSTIC", } -var PBDHTMessage_MessageType_value = map[string]int32{ +var Message_MessageType_value = map[string]int32{ "PUT_VALUE": 0, "GET_VALUE": 1, "ADD_PROVIDER": 2, @@ -51,105 +53,107 @@ var PBDHTMessage_MessageType_value = map[string]int32{ "DIAGNOSTIC": 6, } -func (x PBDHTMessage_MessageType) Enum() *PBDHTMessage_MessageType { - p := new(PBDHTMessage_MessageType) +func (x Message_MessageType) Enum() *Message_MessageType { + p := new(Message_MessageType) *p = x return p } -func (x PBDHTMessage_MessageType) String() string { - return proto.EnumName(PBDHTMessage_MessageType_name, int32(x)) +func (x Message_MessageType) String() string { + return proto.EnumName(Message_MessageType_name, int32(x)) } -func (x *PBDHTMessage_MessageType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PBDHTMessage_MessageType_value, data, "PBDHTMessage_MessageType") +func (x *Message_MessageType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Message_MessageType_value, data, "Message_MessageType") if err != nil { return err } - *x = PBDHTMessage_MessageType(value) + *x = Message_MessageType(value) return nil } -type PBDHTMessage struct { - Type *PBDHTMessage_MessageType `protobuf:"varint,1,req,name=type,enum=dht.PBDHTMessage_MessageType" json:"type,omitempty"` - Key *string `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` - Value []byte `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` - Id *string `protobuf:"bytes,4,req,name=id" json:"id,omitempty"` - Response *bool `protobuf:"varint,5,opt,name=response" json:"response,omitempty"` - Success *bool `protobuf:"varint,6,opt,name=success" json:"success,omitempty"` - Peers []*PBDHTMessage_PBPeer `protobuf:"bytes,7,rep,name=peers" json:"peers,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PBDHTMessage) Reset() { *m = PBDHTMessage{} } -func (m *PBDHTMessage) String() string { return proto.CompactTextString(m) } -func (*PBDHTMessage) ProtoMessage() {} - -func (m *PBDHTMessage) GetType() PBDHTMessage_MessageType { +type Message struct { + // defines what type of message it is. + Type *Message_MessageType `protobuf:"varint,1,req,name=type,enum=dht.Message_MessageType" json:"type,omitempty"` + // defines what coral cluster level this query/response belongs to. + ClusterLevelRaw *int32 `protobuf:"varint,10,opt,name=clusterLevelRaw" json:"clusterLevelRaw,omitempty"` + // Used to specify the key associated with this message. + // PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS + Key *string `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` + // Used to return a value + // PUT_VALUE, GET_VALUE + Value []byte `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` + // Used to return peers closer to a key in a query + // GET_VALUE, GET_PROVIDERS, FIND_NODE + CloserPeers []*Message_Peer `protobuf:"bytes,8,rep,name=closerPeers" json:"closerPeers,omitempty"` + // Used to return Providers + // GET_VALUE, ADD_PROVIDER, GET_PROVIDERS + ProviderPeers []*Message_Peer `protobuf:"bytes,9,rep,name=providerPeers" json:"providerPeers,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} + +func (m *Message) GetType() Message_MessageType { if m != nil && m.Type != nil { return *m.Type } - return PBDHTMessage_PUT_VALUE + return Message_PUT_VALUE } -func (m *PBDHTMessage) GetKey() string { +func (m *Message) GetClusterLevelRaw() int32 { + if m != nil && m.ClusterLevelRaw != nil { + return *m.ClusterLevelRaw + } + return 0 +} + +func (m *Message) GetKey() string { if m != nil && m.Key != nil { return *m.Key } return "" } -func (m *PBDHTMessage) GetValue() []byte { +func (m *Message) GetValue() []byte { if m != nil { return m.Value } return nil } -func (m *PBDHTMessage) GetId() string { - if m != nil && m.Id != nil { - return *m.Id - } - return "" -} - -func (m *PBDHTMessage) GetResponse() bool { - if m != nil && m.Response != nil { - return *m.Response - } - return false -} - -func (m *PBDHTMessage) GetSuccess() bool { - if m != nil && m.Success != nil { - return *m.Success +func (m *Message) GetCloserPeers() []*Message_Peer { + if m != nil { + return m.CloserPeers } - return false + return nil } -func (m *PBDHTMessage) GetPeers() []*PBDHTMessage_PBPeer { +func (m *Message) GetProviderPeers() []*Message_Peer { if m != nil { - return m.Peers + return m.ProviderPeers } return nil } -type PBDHTMessage_PBPeer struct { +type Message_Peer struct { Id *string `protobuf:"bytes,1,req,name=id" json:"id,omitempty"` Addr *string `protobuf:"bytes,2,req,name=addr" json:"addr,omitempty"` XXX_unrecognized []byte `json:"-"` } -func (m *PBDHTMessage_PBPeer) Reset() { *m = PBDHTMessage_PBPeer{} } -func (m *PBDHTMessage_PBPeer) String() string { return proto.CompactTextString(m) } -func (*PBDHTMessage_PBPeer) ProtoMessage() {} +func (m *Message_Peer) Reset() { *m = Message_Peer{} } +func (m *Message_Peer) String() string { return proto.CompactTextString(m) } +func (*Message_Peer) ProtoMessage() {} -func (m *PBDHTMessage_PBPeer) GetId() string { +func (m *Message_Peer) GetId() string { if m != nil && m.Id != nil { return *m.Id } return "" } -func (m *PBDHTMessage_PBPeer) GetAddr() string { +func (m *Message_Peer) GetAddr() string { if m != nil && m.Addr != nil { return *m.Addr } @@ -157,5 +161,5 @@ func (m *PBDHTMessage_PBPeer) GetAddr() string { } func init() { - proto.RegisterEnum("dht.PBDHTMessage_MessageType", PBDHTMessage_MessageType_name, PBDHTMessage_MessageType_value) + proto.RegisterEnum("dht.Message_MessageType", Message_MessageType_name, Message_MessageType_value) } diff --git a/routing/dht/messages.proto b/routing/dht/messages.proto index c2c5cc30d8f..3c33f9382b2 100644 --- a/routing/dht/messages.proto +++ b/routing/dht/messages.proto @@ -2,7 +2,7 @@ package dht; //run `protoc --go_out=. *.proto` to generate -message PBDHTMessage { +message Message { enum MessageType { PUT_VALUE = 0; GET_VALUE = 1; @@ -13,22 +13,30 @@ message PBDHTMessage { DIAGNOSTIC = 6; } - message PBPeer { + message Peer { required string id = 1; required string addr = 2; } + // defines what type of message it is. required MessageType type = 1; + + // defines what coral cluster level this query/response belongs to. + optional int32 clusterLevelRaw = 10; + + // Used to specify the key associated with this message. + // PUT_VALUE, GET_VALUE, ADD_PROVIDER, GET_PROVIDERS optional string key = 2; - optional bytes value = 3; - // Unique ID of this message, used to match queries with responses - required string id = 4; + // Used to return a value + // PUT_VALUE, GET_VALUE + optional bytes value = 3; - // Signals whether or not this message is a response to another message - optional bool response = 5; - optional bool success = 6; + // Used to return peers closer to a key in a query + // GET_VALUE, GET_PROVIDERS, FIND_NODE + repeated Peer closerPeers = 8; - // Used for returning peers from queries (normally, peers closer to X) - repeated PBPeer peers = 7; + // Used to return Providers + // GET_VALUE, ADD_PROVIDER, GET_PROVIDERS + repeated Peer providerPeers = 9; } diff --git a/routing/dht/providers_test.go b/routing/dht/providers_test.go index 0cdfa4fcc78..8620bc88073 100644 --- a/routing/dht/providers_test.go +++ b/routing/dht/providers_test.go @@ -12,9 +12,14 @@ func TestProviderManager(t *testing.T) { p := NewProviderManager(mid) a := u.Key("test") p.AddProvider(a, &peer.Peer{}) - resp := p.GetProviders(a) - if len(resp) != 1 { - t.Fatal("Could not retrieve provider.") + remotePeers := p.GetProviders(a) + localPeers := p.GetLocal() + if len(remotePeers) != 1 { + t.Fatal("Could not retrieve remote provider.") } + if len(localPeers) != 1 { + t.Fatal("Could not retrieve local provider.") + } + p.Halt() } diff --git a/routing/dht/routing.go b/routing/dht/routing.go index 383c64a9892..49fdb06ee08 100644 --- a/routing/dht/routing.go +++ b/routing/dht/routing.go @@ -10,9 +10,9 @@ import ( ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" + swarm "github.com/jbenet/go-ipfs/net/swarm" peer "github.com/jbenet/go-ipfs/peer" kb "github.com/jbenet/go-ipfs/routing/kbucket" - swarm "github.com/jbenet/go-ipfs/swarm" u "github.com/jbenet/go-ipfs/util" ) @@ -261,7 +261,7 @@ func (dht *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Pee } if pmes.GetSuccess() { u.DOut("Got providers back from findProviders call!\n") - provs := dht.addPeerList(key, pmes.GetPeers()) + provs := dht.addProviders(key, pmes.GetPeers()) ll.Success = true return provs, nil } diff --git a/routing/routing.go b/routing/routing.go index fdf3507491b..c8dc2772b4e 100644 --- a/routing/routing.go +++ b/routing/routing.go @@ -10,6 +10,7 @@ import ( // IpfsRouting is the routing module interface // It is implemented by things like DHTs, etc. type IpfsRouting interface { + FindProvidersAsync(u.Key, int, time.Duration) <-chan *peer.Peer // Basic Put/Get diff --git a/swarm/interface.go b/swarm/interface.go deleted file mode 100644 index 413a42ee216..00000000000 --- a/swarm/interface.go +++ /dev/null @@ -1,20 +0,0 @@ -package swarm - -import ( - peer "github.com/jbenet/go-ipfs/peer" - u "github.com/jbenet/go-ipfs/util" - - ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" -) - -type Network interface { - Find(u.Key) *peer.Peer - Listen() error - ConnectNew(*ma.Multiaddr) (*peer.Peer, error) - GetConnection(id peer.ID, addr *ma.Multiaddr) (*peer.Peer, error) - Error(error) - GetErrChan() chan error - GetChannel(PBWrapper_MessageType) *Chan - Close() - Drop(*peer.Peer) error -} diff --git a/swarm/mes_listener.go b/swarm/mes_listener.go deleted file mode 100644 index 97cabe810eb..00000000000 --- a/swarm/mes_listener.go +++ /dev/null @@ -1,123 +0,0 @@ -package swarm - -import ( - crand "crypto/rand" - "sync" - "time" - - u "github.com/jbenet/go-ipfs/util" -) - -type MessageListener struct { - listeners map[string]*listenInfo - haltchan chan struct{} - unlist chan string - nlist chan *listenInfo - send chan *respMes -} - -// GenerateMessageID creates and returns a new message ID -func GenerateMessageID() string { - buf := make([]byte, 16) - crand.Read(buf) - return string(buf) -} - -// The listen info struct holds information about a message that is being waited for -type listenInfo struct { - // Responses matching the listen ID will be sent through resp - resp chan *Message - - // count is the number of responses to listen for - count int - - // eol is the time at which this listener will expire - eol time.Time - - // sendlock is used to prevent conditions where we try to send on the resp - // channel as its being closed by a timeout in another thread - sendLock sync.Mutex - - closed bool - - id string -} - -func NewMessageListener() *MessageListener { - ml := new(MessageListener) - ml.haltchan = make(chan struct{}) - ml.listeners = make(map[string]*listenInfo) - ml.nlist = make(chan *listenInfo, 16) - ml.send = make(chan *respMes, 16) - ml.unlist = make(chan string, 16) - go ml.run() - return ml -} - -func (ml *MessageListener) Listen(id string, count int, timeout time.Duration) <-chan *Message { - li := new(listenInfo) - li.count = count - li.eol = time.Now().Add(timeout) - li.resp = make(chan *Message, count) - li.id = id - ml.nlist <- li - return li.resp -} - -func (ml *MessageListener) Unlisten(id string) { - ml.unlist <- id -} - -type respMes struct { - id string - mes *Message -} - -func (ml *MessageListener) Respond(id string, mes *Message) { - ml.send <- &respMes{ - id: id, - mes: mes, - } -} - -func (ml *MessageListener) Halt() { - ml.haltchan <- struct{}{} -} - -func (ml *MessageListener) run() { - for { - select { - case <-ml.haltchan: - return - case id := <-ml.unlist: - trg, ok := ml.listeners[id] - if !ok { - continue - } - close(trg.resp) - delete(ml.listeners, id) - case li := <-ml.nlist: - ml.listeners[li.id] = li - case s := <-ml.send: - trg, ok := ml.listeners[s.id] - if !ok { - u.DOut("Send with no listener.") - continue - } - - if time.Now().After(trg.eol) { - close(trg.resp) - delete(ml.listeners, s.id) - continue - } - - trg.resp <- s.mes - trg.count-- - - if trg.count == 0 { - close(trg.resp) - delete(ml.listeners, s.id) - } - } - } -} diff --git a/swarm/mes_listener_test.go b/swarm/mes_listener_test.go deleted file mode 100644 index 566011aa930..00000000000 --- a/swarm/mes_listener_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package swarm - -import ( - "testing" - "time" - - peer "github.com/jbenet/go-ipfs/peer" -) - -// Ensure that the Message Listeners basic functionality works -func TestMessageListener(t *testing.T) { - ml := NewMessageListener() - a := GenerateMessageID() - resp := ml.Listen(a, 1, time.Minute) - - pmes := new(PBWrapper) - pmes.Message = []byte("Hello") - pmes.Type = new(PBWrapper_MessageType) - mes := NewMessage(new(peer.Peer), pmes) - - go ml.Respond(a, mes) - - del := time.After(time.Millisecond * 100) - select { - case get := <-resp: - if string(get.Data) != string(mes.Data) { - t.Fatal("Something got really messed up") - } - case <-del: - t.Fatal("Waiting on message response timed out.") - } -} diff --git a/swarm/mes_wrapper.pb.go b/swarm/mes_wrapper.pb.go deleted file mode 100644 index f218a448aa3..00000000000 --- a/swarm/mes_wrapper.pb.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by protoc-gen-go. -// source: mes_wrapper.proto -// DO NOT EDIT! - -/* -Package swarm is a generated protocol buffer package. - -It is generated from these files: - mes_wrapper.proto - -It has these top-level messages: - PBWrapper -*/ -package swarm - -import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = math.Inf - -type PBWrapper_MessageType int32 - -const ( - PBWrapper_TEST PBWrapper_MessageType = 0 - PBWrapper_DHT_MESSAGE PBWrapper_MessageType = 1 - PBWrapper_BITSWAP PBWrapper_MessageType = 2 -) - -var PBWrapper_MessageType_name = map[int32]string{ - 0: "TEST", - 1: "DHT_MESSAGE", - 2: "BITSWAP", -} -var PBWrapper_MessageType_value = map[string]int32{ - "TEST": 0, - "DHT_MESSAGE": 1, - "BITSWAP": 2, -} - -func (x PBWrapper_MessageType) Enum() *PBWrapper_MessageType { - p := new(PBWrapper_MessageType) - *p = x - return p -} -func (x PBWrapper_MessageType) String() string { - return proto.EnumName(PBWrapper_MessageType_name, int32(x)) -} -func (x *PBWrapper_MessageType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PBWrapper_MessageType_value, data, "PBWrapper_MessageType") - if err != nil { - return err - } - *x = PBWrapper_MessageType(value) - return nil -} - -type PBWrapper struct { - Type *PBWrapper_MessageType `protobuf:"varint,1,req,enum=swarm.PBWrapper_MessageType" json:"Type,omitempty"` - Message []byte `protobuf:"bytes,2,req" json:"Message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PBWrapper) Reset() { *m = PBWrapper{} } -func (m *PBWrapper) String() string { return proto.CompactTextString(m) } -func (*PBWrapper) ProtoMessage() {} - -func (m *PBWrapper) GetType() PBWrapper_MessageType { - if m != nil && m.Type != nil { - return *m.Type - } - return PBWrapper_TEST -} - -func (m *PBWrapper) GetMessage() []byte { - if m != nil { - return m.Message - } - return nil -} - -func init() { - proto.RegisterEnum("swarm.PBWrapper_MessageType", PBWrapper_MessageType_name, PBWrapper_MessageType_value) -} diff --git a/swarm/mes_wrapper.proto b/swarm/mes_wrapper.proto deleted file mode 100644 index ab72232f64f..00000000000 --- a/swarm/mes_wrapper.proto +++ /dev/null @@ -1,12 +0,0 @@ -package swarm; - -message PBWrapper { - enum MessageType { - TEST = 0; - DHT_MESSAGE = 1; - BITSWAP = 2; - } - - required MessageType Type = 1; - required bytes Message = 2; -} diff --git a/swarm/swarm.go b/swarm/swarm.go deleted file mode 100644 index 38945370328..00000000000 --- a/swarm/swarm.go +++ /dev/null @@ -1,506 +0,0 @@ -package swarm - -import ( - "errors" - "fmt" - "net" - "sync" - - proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" - ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr" - ident "github.com/jbenet/go-ipfs/identify" - peer "github.com/jbenet/go-ipfs/peer" - u "github.com/jbenet/go-ipfs/util" -) - -var ErrAlreadyOpen = errors.New("Error: Connection to this peer already open.") - -// Message represents a packet of information sent to or received from a -// particular Peer. -type Message struct { - // To or from, depending on direction. - Peer *peer.Peer - - // Opaque data - Data []byte -} - -// Cleaner looking helper function to make a new message struct -func NewMessage(p *peer.Peer, data proto.Message) *Message { - bytes, err := proto.Marshal(data) - if err != nil { - u.PErr("%v\n", err.Error()) - return nil - } - return &Message{ - Peer: p, - Data: bytes, - } -} - -// Chan is a swarm channel, which provides duplex communication and errors. -type Chan struct { - Outgoing chan *Message - Incoming chan *Message - Errors chan error - Close chan bool -} - -// NewChan constructs a Chan instance, with given buffer size bufsize. -func NewChan(bufsize int) *Chan { - return &Chan{ - Outgoing: make(chan *Message, bufsize), - Incoming: make(chan *Message, bufsize), - Errors: make(chan error, bufsize), - Close: make(chan bool, bufsize), - } -} - -// Contains a set of errors mapping to each of the swarms addresses -// that were listened on -type SwarmListenErr struct { - Errors []error -} - -func (se *SwarmListenErr) Error() string { - if se == nil { - return "" - } - var out string - for i, v := range se.Errors { - if v != nil { - out += fmt.Sprintf("%d: %s\n", i, v) - } - } - return out -} - -// Swarm is a connection muxer, allowing connections to other peers to -// be opened and closed, while still using the same Chan for all -// communication. The Chan sends/receives Messages, which note the -// destination or source Peer. -type Swarm struct { - Chan *Chan - conns ConnMap - connsLock sync.RWMutex - - filterChans map[PBWrapper_MessageType]*Chan - toFilter chan *Message - newFilters chan *newFilterInfo - - local *peer.Peer - listeners []net.Listener - haltroute chan struct{} -} - -// NewSwarm constructs a Swarm, with a Chan. -func NewSwarm(local *peer.Peer) *Swarm { - s := &Swarm{ - Chan: NewChan(10), - conns: ConnMap{}, - local: local, - filterChans: make(map[PBWrapper_MessageType]*Chan), - toFilter: make(chan *Message, 32), - newFilters: make(chan *newFilterInfo), - haltroute: make(chan struct{}), - } - go s.routeMessages() - go s.fanOut() - return s -} - -// Open listeners for each network the swarm should listen on -func (s *Swarm) Listen() error { - var ret_err *SwarmListenErr - for i, addr := range s.local.Addresses { - err := s.connListen(addr) - if err != nil { - if ret_err == nil { - ret_err = new(SwarmListenErr) - ret_err.Errors = make([]error, len(s.local.Addresses)) - } - ret_err.Errors[i] = err - u.PErr("Failed to listen on: %s [%s]", addr, err) - } - } - if ret_err == nil { - return nil - } - return ret_err -} - -// Listen for new connections on the given multiaddr -func (s *Swarm) connListen(maddr *ma.Multiaddr) error { - netstr, addr, err := maddr.DialArgs() - if err != nil { - return err - } - - list, err := net.Listen(netstr, addr) - if err != nil { - return err - } - - // NOTE: this may require a lock around it later. currently, only run on setup - s.listeners = append(s.listeners, list) - - // Accept and handle new connections on this listener until it errors - go func() { - for { - nconn, err := list.Accept() - if err != nil { - e := fmt.Errorf("Failed to accept connection: %s - %s [%s]", - netstr, addr, err) - go func() { s.Chan.Errors <- e }() - return - } - go s.handleNewConn(nconn) - } - }() - - return nil -} - -// Handle getting ID from this peer and adding it into the map -func (s *Swarm) handleNewConn(nconn net.Conn) { - p := new(peer.Peer) - - conn := &Conn{ - Peer: p, - Addr: nil, - Conn: nconn, - } - newConnChans(conn) - - sin, sout, err := ident.Handshake(s.local, p, conn.Incoming.MsgChan, conn.Outgoing.MsgChan) - if err != nil { - u.PErr("%v\n", err.Error()) - conn.Close() - return - } - - // Get address to contact remote peer from - addr := <-sin - maddr, err := ma.NewMultiaddr(string(addr)) - if err != nil { - u.PErr("Got invalid address from peer.") - s.Error(err) - return - } - p.AddAddress(maddr) - - conn.secIn = sin - conn.secOut = sout - - err = s.StartConn(conn) - if err != nil { - s.Error(err) - } -} - -// Close closes a swarm. -func (s *Swarm) Close() { - s.connsLock.RLock() - l := len(s.conns) - s.connsLock.RUnlock() - - for i := 0; i < l; i++ { - s.Chan.Close <- true // fan ins - } - s.Chan.Close <- true // fan out - s.Chan.Close <- true // listener - - for _, list := range s.listeners { - list.Close() - } - - s.haltroute <- struct{}{} - - for _, filter := range s.filterChans { - filter.Close <- true - } -} - -// Dial connects to a peer. -// -// The idea is that the client of Swarm does not need to know what network -// the connection will happen over. Swarm can use whichever it choses. -// This allows us to use various transport protocols, do NAT traversal/relay, -// etc. to achive connection. -// -// For now, Dial uses only TCP. This will be extended. -func (s *Swarm) Dial(peer *peer.Peer) (*Conn, error, bool) { - k := peer.Key() - - // check if we already have an open connection first - s.connsLock.RLock() - conn, found := s.conns[k] - s.connsLock.RUnlock() - if found { - return conn, nil, true - } - - // open connection to peer - conn, err := Dial("tcp", peer) - if err != nil { - return nil, err, false - } - - return conn, nil, false -} - -// StartConn adds the passed in connection to its peerMap and starts -// the fanIn routine for that connection -func (s *Swarm) StartConn(conn *Conn) error { - if conn == nil { - return errors.New("Tried to start nil connection.") - } - - u.DOut("Starting connection: %s\n", conn.Peer.Key().Pretty()) - // add to conns - s.connsLock.Lock() - if _, ok := s.conns[conn.Peer.Key()]; ok { - s.connsLock.Unlock() - return ErrAlreadyOpen - } - s.conns[conn.Peer.Key()] = conn - s.connsLock.Unlock() - - // kick off reader goroutine - go s.fanIn(conn) - return nil -} - -// Handles the unwrapping + sending of messages to the right connection. -func (s *Swarm) fanOut() { - for { - select { - case <-s.Chan.Close: - return // told to close. - case msg, ok := <-s.Chan.Outgoing: - if !ok { - return - } - - if len(msg.Data) > MaxMessageSize { - s.Error(fmt.Errorf("Exceeded max message size! (tried to send len = %d)", len(msg.Data))) - } - - s.connsLock.RLock() - conn, found := s.conns[msg.Peer.Key()] - s.connsLock.RUnlock() - - if !found { - e := fmt.Errorf("Sent msg to peer without open conn: %v", - msg.Peer) - s.Chan.Errors <- e - continue - } - - // queue it in the connection's buffer - conn.secOut <- msg.Data - } - } -} - -// Handles the receiving + wrapping of messages, per conn. -// Consider using reflect.Select with one goroutine instead of n. -func (s *Swarm) fanIn(conn *Conn) { - for { - select { - case <-s.Chan.Close: - // close Conn. - conn.Close() - goto out - - case <-conn.Closed: - goto out - - case data, ok := <-conn.secIn: - if !ok { - e := fmt.Errorf("Error retrieving from conn: %v", conn.Peer.Key().Pretty()) - s.Chan.Errors <- e - goto out - } - - msg := &Message{Peer: conn.Peer, Data: data} - s.toFilter <- msg - } - } -out: - - s.connsLock.Lock() - delete(s.conns, conn.Peer.Key()) - s.connsLock.Unlock() -} - -type newFilterInfo struct { - Type PBWrapper_MessageType - resp chan *Chan -} - -func (s *Swarm) routeMessages() { - for { - select { - case mes, ok := <-s.toFilter: - if !ok { - return - } - wrapper, err := Unwrap(mes.Data) - if err != nil { - u.PErr("error in route messages: %s\n", err) - } - - ch, ok := s.filterChans[PBWrapper_MessageType(wrapper.GetType())] - if !ok { - u.PErr("Received message with invalid type: %d\n", wrapper.GetType()) - continue - } - - mes.Data = wrapper.GetMessage() - ch.Incoming <- mes - case gchan := <-s.newFilters: - nch, ok := s.filterChans[gchan.Type] - if !ok { - nch = NewChan(16) - s.filterChans[gchan.Type] = nch - go s.muxChan(nch, gchan.Type) - } - gchan.resp <- nch - case <-s.haltroute: - return - } - } -} - -func (s *Swarm) muxChan(ch *Chan, typ PBWrapper_MessageType) { - for { - select { - case <-ch.Close: - return - case mes := <-ch.Outgoing: - data, err := Wrap(mes.Data, typ) - if err != nil { - u.PErr("muxChan error: %s\n", err) - continue - } - mes.Data = data - s.Chan.Outgoing <- mes - } - } -} - -func (s *Swarm) Find(key u.Key) *peer.Peer { - s.connsLock.RLock() - defer s.connsLock.RUnlock() - conn, found := s.conns[key] - if !found { - return nil - } - return conn.Peer -} - -// GetConnection will check if we are already connected to the peer in question -// and only open a new connection if we arent already -func (s *Swarm) GetConnection(id peer.ID, addr *ma.Multiaddr) (*peer.Peer, error) { - p := &peer.Peer{ - ID: id, - Addresses: []*ma.Multiaddr{addr}, - } - - if id.Equal(s.local.ID) { - panic("Attempted connection to self!") - } - - conn, err, reused := s.Dial(p) - if err != nil { - return nil, err - } - - if reused { - return p, nil - } - - err = s.handleDialedCon(conn) - return conn.Peer, err -} - -// Handle performing a handshake on a new connection and ensuring proper forward communication -func (s *Swarm) handleDialedCon(conn *Conn) error { - sin, sout, err := ident.Handshake(s.local, conn.Peer, conn.Incoming.MsgChan, conn.Outgoing.MsgChan) - if err != nil { - return err - } - - // Send node an address that you can be reached on - myaddr := s.local.NetAddress("tcp") - mastr, err := myaddr.String() - if err != nil { - return errors.New("No local address to send to peer.") - } - - sout <- []byte(mastr) - - conn.secIn = sin - conn.secOut = sout - - s.StartConn(conn) - - return nil -} - -// ConnectNew is for connecting to a peer when you dont know their ID, -// Should only be used when you are sure that you arent already connected to peer in question -func (s *Swarm) ConnectNew(addr *ma.Multiaddr) (*peer.Peer, error) { - if addr == nil { - return nil, errors.New("nil Multiaddr passed to swarm.Connect()") - } - npeer := new(peer.Peer) - npeer.AddAddress(addr) - - conn, err := Dial("tcp", npeer) - if err != nil { - return nil, err - } - - err = s.handleDialedCon(conn) - return npeer, err -} - -// Removes a given peer from the swarm and closes connections to it -func (s *Swarm) Drop(p *peer.Peer) error { - u.DOut("Dropping peer: [%s]\n", p.ID.Pretty()) - s.connsLock.RLock() - conn, found := s.conns[u.Key(p.ID)] - s.connsLock.RUnlock() - if !found { - return u.ErrNotFound - } - - s.connsLock.Lock() - delete(s.conns, u.Key(p.ID)) - s.connsLock.Unlock() - - return conn.Close() -} - -func (s *Swarm) Error(e error) { - s.Chan.Errors <- e -} - -func (s *Swarm) GetErrChan() chan error { - return s.Chan.Errors -} - -func (s *Swarm) GetChannel(typ PBWrapper_MessageType) *Chan { - nfi := &newFilterInfo{ - Type: typ, - resp: make(chan *Chan), - } - s.newFilters <- nfi - - return <-nfi.resp -} - -// Temporary to ensure that the Swarm always matches the Network interface as we are changing it -var _ Network = &Swarm{} diff --git a/swarm/wrapper.go b/swarm/wrapper.go deleted file mode 100644 index 469620e8ba6..00000000000 --- a/swarm/wrapper.go +++ /dev/null @@ -1,24 +0,0 @@ -package swarm - -import "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto" - -func Wrap(data []byte, typ PBWrapper_MessageType) ([]byte, error) { - wrapper := new(PBWrapper) - wrapper.Message = data - wrapper.Type = &typ - b, err := proto.Marshal(wrapper) - if err != nil { - return nil, err - } - return b, nil -} - -func Unwrap(data []byte) (*PBWrapper, error) { - mes := new(PBWrapper) - err := proto.Unmarshal(data, mes) - if err != nil { - return nil, err - } - - return mes, nil -} diff --git a/util/testutil/blocks.go b/util/testutil/blocks.go new file mode 100644 index 00000000000..05b69239998 --- /dev/null +++ b/util/testutil/blocks.go @@ -0,0 +1,22 @@ +package testutil + +import ( + "testing" + + blocks "github.com/jbenet/go-ipfs/blocks" +) + +// NewBlockOrFail returns a block created from msgData. Signals test failure if +// creation fails. +// +// NB: NewBlockOrFail accepts a msgData parameter to avoid non-determinism in +// tests. Generating random block data could potentially result in unexpected +// behavior in tests. Thus, it is left up to the caller to select the msgData +// that will determine the blocks key. +func NewBlockOrFail(t *testing.T, msgData string) blocks.Block { + block, blockCreationErr := blocks.NewBlock([]byte(msgData)) + if blockCreationErr != nil { + t.Fatal(blockCreationErr) + } + return *block +}