diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go index 886895852ffb..3ae45d4c2feb 100644 --- a/cmd/swarm/main.go +++ b/cmd/swarm/main.go @@ -560,7 +560,7 @@ func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node. } // In production, mockStore must be always nil. - return swarm.NewSwarm(ctx, swapClient, ensClient, bzzconfig, bzzconfig.SwapEnabled, bzzconfig.SyncEnabled, bzzconfig.Cors, bzzconfig.PssEnabled, nil) + return swarm.NewSwarm(ctx, swapClient, ensClient, bzzconfig, nil) } //register within the ethereum node if err := stack.Register(boot); err != nil { diff --git a/swarm/api/api.go b/swarm/api/api.go index 8c4bca2ec0e0..572b150da225 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -17,6 +17,7 @@ package api import ( + "context" "fmt" "io" "net/http" @@ -46,15 +47,17 @@ on top of the dpa it is the public interface of the dpa which is included in the ethereum stack */ type Api struct { - dpa *storage.DPA - dns Resolver + resource *storage.ResourceHandler + dpa *storage.DPA + dns Resolver } //the api constructor initialises -func NewApi(dpa *storage.DPA, dns Resolver) (self *Api) { +func NewApi(dpa *storage.DPA, dns Resolver, resourceHandler *storage.ResourceHandler) (self *Api) { self = &Api{ - dpa: dpa, - dns: dns, + dpa: dpa, + dns: dns, + resource: resourceHandler, } return } @@ -361,3 +364,50 @@ func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storag } return key, manifestEntryMap, nil } + +// Look up mutable resource updates at specific periods and versions +func (self *Api) ResourceLookup(ctx context.Context, name string, period uint32, version uint32) (storage.Key, []byte, error) { + var err error + if version != 0 { + if period == 0 { + currentblocknumber, err := self.resource.GetBlock(ctx) + if err != nil { + return nil, nil, fmt.Errorf("Could not determine latest block: %v", err) + } + period = self.resource.BlockToPeriod(name, currentblocknumber) + } + _, err = self.resource.LookupVersionByName(ctx, name, period, version, true) + } else if period != 0 { + _, err = self.resource.LookupHistoricalByName(ctx, name, period, true) + } else { + _, err = self.resource.LookupLatestByName(ctx, name, true) + } + if err != nil { + return nil, nil, err + } + return self.resource.GetContent(name) +} + +func (self *Api) ResourceCreate(ctx context.Context, name string, frequency uint64) (storage.Key, error) { + rsrc, err := self.resource.NewResource(ctx, name, frequency) + if err != nil { + return nil, err + } + h := rsrc.NameHash() + return storage.Key(h[:]), nil +} + +func (self *Api) ResourceUpdate(ctx context.Context, name string, data []byte) (storage.Key, uint32, uint32, error) { + key, err := self.resource.Update(ctx, name, data) + period, _ := self.resource.GetLastPeriod(name) + version, _ := self.resource.GetVersion(name) + return key, period, version, err +} + +func (self *Api) ResourceHashSize() int { + return self.resource.HashSize() +} + +func (self *Api) ResourceIsValidated() bool { + return self.resource.IsValidated() +} diff --git a/swarm/api/api_test.go b/swarm/api/api_test.go index e673f76c4267..57c8e88f2951 100644 --- a/swarm/api/api_test.go +++ b/swarm/api/api_test.go @@ -40,7 +40,7 @@ func testApi(t *testing.T, f func(*Api)) { if err != nil { return } - api := NewApi(dpa, nil) + api := NewApi(dpa, nil, nil) dpa.Start() f(api) dpa.Stop() diff --git a/swarm/api/config.go b/swarm/api/config.go index d4dba36094fa..14906176e992 100644 --- a/swarm/api/config.go +++ b/swarm/api/config.go @@ -46,22 +46,24 @@ type Config struct { *network.HiveParams Swap *swap.SwapParams //*network.SyncParams - Contract common.Address - EnsRoot common.Address - EnsApi string - Path string - ListenAddr string - Port string - PublicKey string - BzzKey string - NetworkId uint64 - SwapEnabled bool - SyncEnabled bool - PssEnabled bool - SwapApi string - Cors string - BzzAccount string - BootNodes string + Contract common.Address + EnsRoot common.Address + EnsApi string + Path string + ListenAddr string + Port string + PublicKey string + BzzKey string + NetworkId uint64 + SwapEnabled bool + SyncEnabled bool + PssEnabled bool + ResourceEnabled bool + SwapApi string + Cors string + BzzAccount string + BootNodes string + privateKey *ecdsa.PrivateKey } //create a default config with all parameters to set to defaults @@ -72,18 +74,19 @@ func NewConfig() (self *Config) { ChunkerParams: storage.NewChunkerParams(), HiveParams: network.NewHiveParams(), //SyncParams: network.NewDefaultSyncParams(), - Swap: swap.NewDefaultSwapParams(), - ListenAddr: DefaultHTTPListenAddr, - Port: DefaultHTTPPort, - Path: node.DefaultDataDir(), - EnsApi: node.DefaultIPCEndpoint("geth"), - EnsRoot: ens.TestNetAddress, - NetworkId: network.NetworkID, - SwapEnabled: false, - SyncEnabled: true, - PssEnabled: true, - SwapApi: "", - BootNodes: "", + Swap: swap.NewDefaultSwapParams(), + ListenAddr: DefaultHTTPListenAddr, + Port: DefaultHTTPPort, + Path: node.DefaultDataDir(), + EnsApi: node.DefaultIPCEndpoint("geth"), + EnsRoot: ens.TestNetAddress, + NetworkId: network.NetworkID, + SwapEnabled: false, + SyncEnabled: true, + PssEnabled: true, + ResourceEnabled: true, + SwapApi: "", + BootNodes: "", } return @@ -108,8 +111,17 @@ func (self *Config) Init(prvKey *ecdsa.PrivateKey) { self.PublicKey = pubkeyhex self.BzzKey = keyhex - self.Swap.Init(self.Contract, prvKey) - //self.SyncParams.Init(self.Path) - //self.HiveParams.Init(self.Path) + if self.SwapEnabled { + self.Swap.Init(self.Contract, prvKey) + } + self.privateKey = prvKey self.StoreParams.Init(self.Path) } + +func (self *Config) ShiftPrivateKey() (privKey *ecdsa.PrivateKey) { + if self.privateKey != nil { + privKey = self.privateKey + self.privateKey = nil + } + return privKey +} diff --git a/swarm/api/config_test.go b/swarm/api/config_test.go index 4851f19fc528..5a5f176c0b14 100644 --- a/swarm/api/config_test.go +++ b/swarm/api/config_test.go @@ -49,16 +49,9 @@ func TestConfig(t *testing.T) { if one.PublicKey == "" { t.Fatal("Expected PublicKey to be set") } - - //the Init function should append subdirs to the given path - if one.Swap.PayProfile.Beneficiary == (common.Address{}) { + if one.Swap.PayProfile.Beneficiary == (common.Address{}) && one.SwapEnabled { t.Fatal("Failed to correctly initialize SwapParams") } - - if one.HiveParams.MaxPeersPerRequest != 5 { - t.Fatal("Failed to correctly initialize HiveParams") - } - if one.StoreParams.ChunkDbPath == one.Path { t.Fatal("Failed to correctly initialize StoreParams") } diff --git a/swarm/api/http/server.go b/swarm/api/http/server.go index 74341899d22b..a71edfdd23d8 100644 --- a/swarm/api/http/server.go +++ b/swarm/api/http/server.go @@ -21,6 +21,7 @@ package http import ( "archive/tar" + "bytes" "encoding/json" "errors" "fmt" @@ -290,6 +291,95 @@ func (s *Server) HandleDelete(w http.ResponseWriter, r *Request) { fmt.Fprint(w, newKey) } +func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { + var outdata string + if r.uri.Path != "" { + frequency, err := strconv.ParseUint(r.uri.Path, 10, 64) + if err != nil { + s.BadRequest(w, r, fmt.Sprintf("Cannot parse frequency parameter: %v", err)) + return + } + key, err := s.api.ResourceCreate(r.Context(), r.uri.Addr, frequency) + if err != nil { + s.Error(w, r, fmt.Errorf("Resource creation failed: %v", err)) + return + } + outdata = key.Hex() + } + + data, err := ioutil.ReadAll(r.Body) + if err != nil { + s.Error(w, r, err) + return + } + _, _, _, err = s.api.ResourceUpdate(r.Context(), r.uri.Addr, data) + if err != nil { + s.Error(w, r, fmt.Errorf("Update resource failed: %v", err)) + return + } + + if outdata != "" { + w.Header().Add("Content-type", "text/plain") + w.WriteHeader(http.StatusOK) + fmt.Fprint(w, outdata) + return + } + w.WriteHeader(http.StatusOK) +} + +// Retrieve mutable resource updates: +// bzz-resource:// - get latest update +// bzz-resource:/// - get latest update on period n +// bzz-resource://// - get update version m of period n +// = ens name or hash +func (s *Server) HandleGetResource(w http.ResponseWriter, r *Request) { + s.handleGetResource(w, r, r.uri.Addr) +} + +func (s *Server) handleGetResource(w http.ResponseWriter, r *Request, name string) { + var params []string + if len(r.uri.Path) > 0 { + params = strings.Split(r.uri.Path, "/") + } + var updateKey storage.Key + var period uint64 + var version uint64 + var data []byte + var err error + now := time.Now() + log.Debug("handlegetdb", "name", name) + switch len(params) { + case 0: + updateKey, data, err = s.api.ResourceLookup(r.Context(), name, 0, 0) + case 2: + version, err = strconv.ParseUint(params[1], 10, 32) + if err != nil { + break + } + period, err = strconv.ParseUint(params[0], 10, 32) + if err != nil { + break + } + updateKey, data, err = s.api.ResourceLookup(r.Context(), name, uint32(period), uint32(version)) + case 1: + period, err = strconv.ParseUint(params[0], 10, 32) + if err != nil { + break + } + updateKey, data, err = s.api.ResourceLookup(r.Context(), name, uint32(period), uint32(version)) + default: + s.BadRequest(w, r, "Invalid mutable resource request") + return + } + if err != nil { + s.Error(w, r, fmt.Errorf("Mutable resource lookup failed: %v", err)) + return + } + log.Debug("Found update", "key", updateKey) + w.Header().Set("Content-Type", "application/octet-stream") + http.ServeContent(w, &r.Request, "", now, bytes.NewReader(data)) +} + // HandleGet handles a GET request to // - bzz-raw:// and responds with the raw content stored at the // given storage key @@ -335,7 +425,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { return api.SkipManifest }) if entry == nil { - s.NotFound(w, r, fmt.Errorf("Manifest entry could not be loaded")) + s.NotFound(w, r, errors.New("Manifest entry could not be loaded")) return } key = storage.Key(common.Hex2Bytes(entry.Hash)) @@ -357,7 +447,6 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { contentType = typ } w.Header().Set("Content-Type", contentType) - http.ServeContent(w, &r.Request, "", time.Now(), reader) case r.uri.Hash(): w.Header().Set("Content-Type", "text/plain") @@ -604,6 +693,8 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { case "POST": if uri.Raw() || uri.DeprecatedRaw() { s.HandlePostRaw(w, req) + } else if uri.Resource() { + s.HandlePostResource(w, req) } else { s.HandlePostFiles(w, req) } @@ -629,6 +720,12 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.HandleDelete(w, req) case "GET": + + if uri.Resource() { + s.HandleGetResource(w, req) + return + } + if uri.Raw() || uri.Hash() || uri.DeprecatedRaw() { s.HandleGet(w, req) return diff --git a/swarm/api/http/server_test.go b/swarm/api/http/server_test.go index 305d5cf7db68..9a80405ffc20 100644 --- a/swarm/api/http/server_test.go +++ b/swarm/api/http/server_test.go @@ -18,6 +18,7 @@ package http_test import ( "bytes" + "crypto/rand" "errors" "fmt" "io/ioutil" @@ -33,6 +34,128 @@ import ( "github.com/ethereum/go-ethereum/swarm/testutil" ) +func TestBzzResource(t *testing.T) { + srv := testutil.NewTestSwarmServer(t) + defer srv.Close() + + // our mutable resource "name" + keybytes := make([]byte, common.HashLength) + copy(keybytes, []byte{42}) + srv.Hasher.Reset() + srv.Hasher.Write([]byte(fmt.Sprintf("%x", keybytes))) + keybyteshash := fmt.Sprintf("%x", srv.Hasher.Sum(nil)) + + // data of update 1 + databytes := make([]byte, 666) + _, err := rand.Read(databytes) + if err != nil { + t.Fatal(err) + } + + // creates resource and sets update 1 + url := fmt.Sprintf("%s/bzz-resource:/%x/13", srv.URL, keybytes) + resp, err := http.Post(url, "application/octet-stream", bytes.NewReader(databytes)) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("err %s", resp.Status) + } + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(b, []byte(keybyteshash)) { + t.Fatalf("resource update hash mismatch, expected '%s' got '%s'", keybyteshash, b) + } + t.Logf("creatreturn %v / %v", keybyteshash, b) + + // get latest update (1.1) through resource directly + url = fmt.Sprintf("%s/bzz-resource:/%x", srv.URL, keybytes) + resp, err = http.Get(url) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("err %s", resp.Status) + } + b, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(databytes, b) { + t.Fatalf("Expected body '%x', got '%x'", databytes, b) + } + + // update 2 + url = fmt.Sprintf("%s/bzz-resource:/%x", srv.URL, keybytes) + data := []byte("foo") + resp, err = http.Post(url, "application/octet-stream", bytes.NewReader(data)) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("Update returned %s", resp.Status) + } + + // get latest update (1.2) through resource directly + url = fmt.Sprintf("%s/bzz-resource:/%x", srv.URL, keybytes) + resp, err = http.Get(url) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("err %s", resp.Status) + } + b, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(data, b) { + t.Fatalf("Expected body '%x', got '%x'", data, b) + } + + // get latest update (1.2) with specified period + url = fmt.Sprintf("%s/bzz-resource:/%x/1", srv.URL, keybytes) + resp, err = http.Get(url) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("err %s", resp.Status) + } + b, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(data, b) { + t.Fatalf("Expected body '%x', got '%x'", data, b) + } + + // get first update (1.1) with specified period and version + url = fmt.Sprintf("%s/bzz-resource:/%x/1/1", srv.URL, keybytes) + resp, err = http.Get(url) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("err %s", resp.Status) + } + b, err = ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(databytes, b) { + t.Fatalf("Expected body '%x', got '%x'", databytes, b) + } +} + func TestBzzGetPath(t *testing.T) { var err error @@ -258,7 +381,6 @@ func TestBzzGetPath(t *testing.T) { t.Fatalf("Non-Hash response body does not match, expected: %v, got: %v", nonhashresponses[i], string(respbody)) } } - } // TestBzzRootRedirect tests that getting the root path of a manifest without diff --git a/swarm/api/manifest.go b/swarm/api/manifest.go index 685a300fca5b..fde086b7ac5e 100644 --- a/swarm/api/manifest.go +++ b/swarm/api/manifest.go @@ -33,7 +33,8 @@ import ( ) const ( - ManifestType = "application/bzz-manifest+json" + ManifestType = "application/bzz-manifest+json" + ResourceContentType = "application/bzz-resource" ) // Manifest represents a swarm manifest diff --git a/swarm/api/uri.go b/swarm/api/uri.go index d8aafedf4138..009bc016fd58 100644 --- a/swarm/api/uri.go +++ b/swarm/api/uri.go @@ -69,7 +69,7 @@ func Parse(rawuri string) (*URI, error) { // check the scheme is valid switch uri.Scheme { - case "bzz", "bzz-raw", "bzz-immutable", "bzz-list", "bzz-hash", "bzzr", "bzzi": + case "bzz", "bzz-raw", "bzz-immutable", "bzz-list", "bzz-hash", "bzzr", "bzzi", "bzz-resource": default: return nil, fmt.Errorf("unknown scheme %q", u.Scheme) } @@ -92,6 +92,10 @@ func Parse(rawuri string) (*URI, error) { return uri, nil } +func (u *URI) Resource() bool { + return u.Scheme == "bzz-resource" +} + func (u *URI) Raw() bool { return u.Scheme == "bzz-raw" } diff --git a/swarm/fuse/swarmfs_test.go b/swarm/fuse/swarmfs_test.go index 42af36345f6f..f0d64c3538a3 100644 --- a/swarm/fuse/swarmfs_test.go +++ b/swarm/fuse/swarmfs_test.go @@ -812,7 +812,7 @@ func TestFUSE(t *testing.T) { if err != nil { t.Fatal(err) } - ta := &testAPI{api: api.NewApi(dpa, nil)} + ta := &testAPI{api: api.NewApi(dpa, nil, nil)} dpa.Start() defer dpa.Stop() diff --git a/swarm/storage/resource.go b/swarm/storage/resource.go index 3d6c65371666..03235fa65a66 100644 --- a/swarm/storage/resource.go +++ b/swarm/storage/resource.go @@ -1,27 +1,30 @@ package storage import ( + "context" "encoding/binary" + "errors" "fmt" + "math/big" "path/filepath" - "strconv" "sync" "time" "golang.org/x/net/idna" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rpc" ) const ( signatureLength = 65 indexSize = 16 - dbDirName = "resource" + DbDirName = "resource" chunkSize = 4096 // temporary until we implement DPA in the resourcehandler defaultStoreTimeout = 4000 * time.Millisecond + hasherCount = 8 ) type Signature [signatureLength]byte @@ -37,6 +40,7 @@ type resource struct { nameHash common.Hash startBlock uint64 lastPeriod uint32 + lastKey Key frequency uint64 version uint32 data []byte @@ -44,18 +48,27 @@ type resource struct { } // TODO Expire content after a defined period (to force resync) -func (r *resource) isSynced() bool { - return !r.updated.IsZero() +func (self *resource) isSynced() bool { + return !self.updated.IsZero() +} + +func (self *resource) NameHash() common.Hash { + return self.nameHash } // Implement to activate validation of resource updates // Specifically signing data and verification of signatures type ResourceValidator interface { + hashSize() int checkAccess(string, common.Address) (bool, error) nameHash(string) common.Hash // nameHashFunc sign(common.Hash) (Signature, error) // SignFunc } +type ethApi interface { + HeaderByNumber(context.Context, *big.Int) (*types.Header, error) +} + // Mutable resource is an entity which allows updates to a resource // without resorting to ENS on each update. // The update scheme is built on swarm chunks with chunk keys following @@ -113,14 +126,15 @@ type ResourceValidator interface { // stored using a separate store, and forwarding/syncing protocols carry per-chunk // flags to tell whether the chunk can be validated or not; if not it is to be // treated as a resource update chunk. +// +// TODO: Include modtime in chunk data + signature type ResourceHandler struct { ChunkStore validator ResourceValidator - rpcClient *rpc.Client + ethClient ethApi resources map[string]*resource - hashLock sync.Mutex + hashPool sync.Pool resourceLock sync.RWMutex - hasher SwarmHash nameHash nameHashFunc storeTimeout time.Duration } @@ -128,11 +142,11 @@ type ResourceHandler struct { // Create or open resource update chunk store // // If validator is nil, signature and access validation will be deactivated -func NewResourceHandler(datadir string, cloudStore CloudStore, rpcClient *rpc.Client, validator ResourceValidator) (*ResourceHandler, error) { +func NewResourceHandler(datadir string, cloudStore CloudStore, ethClient ethApi, validator ResourceValidator) (*ResourceHandler, error) { hashfunc := MakeHashFunc(SHA3Hash) - path := filepath.Join(datadir, dbDirName) + path := filepath.Join(datadir, DbDirName) dbStore, err := NewDbStore(datadir, hashfunc, singletonSwarmDbCapacity, 0) if err != nil { return nil, err @@ -144,28 +158,74 @@ func NewResourceHandler(datadir string, cloudStore CloudStore, rpcClient *rpc.Cl rh := &ResourceHandler{ ChunkStore: newResourceChunkStore(path, hashfunc, localStore, cloudStore), - rpcClient: rpcClient, + ethClient: ethClient, resources: make(map[string]*resource), - hasher: hashfunc(), validator: validator, storeTimeout: defaultStoreTimeout, + hashPool: sync.Pool{ + New: func() interface{} { + return MakeHashFunc(SHA3Hash)() + }, + }, } if rh.validator != nil { rh.nameHash = rh.validator.nameHash } else { rh.nameHash = func(name string) common.Hash { - rh.hashLock.Lock() - defer rh.hashLock.Unlock() - rh.hasher.Reset() - rh.hasher.Write([]byte(name)) - return common.BytesToHash(rh.hasher.Sum(nil)) + hasher := rh.hashPool.Get().(SwarmHash) + defer rh.hashPool.Put(hasher) + hasher.Reset() + hasher.Write([]byte(name)) + hashval := common.BytesToHash(hasher.Sum(nil)) + log.Debug("generic namehasher", "name", name, "hash", hashval) + return hashval } } + for i := 0; i < hasherCount; i++ { + hashfunc := MakeHashFunc(SHA3Hash)() + rh.hashPool.Put(hashfunc) + } + return rh, nil } +func (self *ResourceHandler) IsValidated() bool { + return self.validator == nil +} + +func (self *ResourceHandler) HashSize() int { + return self.validator.hashSize() +} + +// get data from current resource + +func (self *ResourceHandler) GetContent(name string) (Key, []byte, error) { + rsrc := self.getResource(name) + if rsrc == nil || !rsrc.isSynced() { + return nil, nil, errors.New("Resource does not exist or is not synced") + } + return rsrc.lastKey, rsrc.data, nil +} + +func (self *ResourceHandler) GetLastPeriod(name string) (uint32, error) { + rsrc := self.getResource(name) + + if rsrc == nil || !rsrc.isSynced() { + return 0, errors.New("Resource does not exist or is not synced") + } + return rsrc.lastPeriod, nil +} + +func (self *ResourceHandler) GetVersion(name string) (uint32, error) { + rsrc := self.getResource(name) + if rsrc == nil || !rsrc.isSynced() { + return 0, errors.New("Resource does not exist or is not synced") + } + return rsrc.version, nil +} + // \TODO should be hashsize * branches from the chosen chunker, implement with dpa func (self *ResourceHandler) chunkSize() int64 { return chunkSize @@ -176,11 +236,11 @@ func (self *ResourceHandler) chunkSize() int64 { // The signature data should match the hash of the idna-converted name by the validator's namehash function, NOT the raw name bytes. // // The start block of the resource update will be the actual current block height of the connected network. -func (self *ResourceHandler) NewResource(name string, frequency uint64) (*resource, error) { +func (self *ResourceHandler) NewResource(ctx context.Context, name string, frequency uint64) (*resource, error) { // frequency 0 is invalid if frequency == 0 { - return nil, fmt.Errorf("Frequency cannot be 0") + return nil, errors.New("Frequency cannot be 0") } if !isSafeName(name) { @@ -207,7 +267,7 @@ func (self *ResourceHandler) NewResource(name string, frequency uint64) (*resour } // get our blockheight at this time - currentblock, err := self.getBlock() + currentblock, err := self.GetBlock(ctx) if err != nil { return nil, err } @@ -245,8 +305,14 @@ func (self *ResourceHandler) NewResource(name string, frequency uint64) (*resour // root chunk. // It is the callers responsibility to make sure that this chunk exists (if the resource // update root data was retrieved externally, it typically doesn't) -func (self *ResourceHandler) LookupVersion(name string, period uint32, version uint32, refresh bool) (*resource, error) { - rsrc, err := self.loadResource(name, refresh) +// +// +func (self *ResourceHandler) LookupVersionByName(ctx context.Context, name string, period uint32, version uint32, refresh bool) (*resource, error) { + return self.LookupVersion(ctx, self.nameHash(name), name, period, version, refresh) +} + +func (self *ResourceHandler) LookupVersion(ctx context.Context, nameHash common.Hash, name string, period uint32, version uint32, refresh bool) (*resource, error) { + rsrc, err := self.loadResource(nameHash, name, refresh) if err != nil { return nil, err } @@ -261,8 +327,12 @@ func (self *ResourceHandler) LookupVersion(name string, period uint32, version u // and returned. // // See also (*ResourceHandler).LookupVersion -func (self *ResourceHandler) LookupHistorical(name string, period uint32, refresh bool) (*resource, error) { - rsrc, err := self.loadResource(name, refresh) +func (self *ResourceHandler) LookupHistoricalByName(ctx context.Context, name string, period uint32, refresh bool) (*resource, error) { + return self.LookupHistorical(ctx, self.nameHash(name), name, period, refresh) +} + +func (self *ResourceHandler) LookupHistorical(ctx context.Context, nameHash common.Hash, name string, period uint32, refresh bool) (*resource, error) { + rsrc, err := self.loadResource(nameHash, name, refresh) if err != nil { return nil, err } @@ -279,14 +349,18 @@ func (self *ResourceHandler) LookupHistorical(name string, period uint32, refres // Version iteration is done as in (*ResourceHandler).LookupHistorical // // See also (*ResourceHandler).LookupHistorical -func (self *ResourceHandler) LookupLatest(name string, refresh bool) (*resource, error) { +func (self *ResourceHandler) LookupLatestByName(ctx context.Context, name string, refresh bool) (*resource, error) { + return self.LookupLatest(ctx, self.nameHash(name), name, refresh) +} + +func (self *ResourceHandler) LookupLatest(ctx context.Context, nameHash common.Hash, name string, refresh bool) (*resource, error) { // get our blockheight at this time and the next block of the update period - rsrc, err := self.loadResource(name, refresh) + rsrc, err := self.loadResource(nameHash, name, refresh) if err != nil { return nil, err } - currentblock, err := self.getBlock() + currentblock, err := self.GetBlock(ctx) if err != nil { return nil, err } @@ -298,7 +372,7 @@ func (self *ResourceHandler) LookupLatest(name string, refresh bool) (*resource, func (self *ResourceHandler) lookup(rsrc *resource, period uint32, version uint32, refresh bool) (*resource, error) { if period == 0 { - return nil, fmt.Errorf("period must be >0") + return nil, errors.New("period must be >0") } // start from the last possible block period, and iterate previous ones until we find a match @@ -334,11 +408,15 @@ func (self *ResourceHandler) lookup(rsrc *resource, period uint32, version uint3 log.Trace("rsrc update not found, checking previous period", "period", period, "key", key) period-- } - return nil, fmt.Errorf("no updates found") + return nil, errors.New("no updates found") } // load existing mutable resource into resource struct -func (self *ResourceHandler) loadResource(name string, refresh bool) (*resource, error) { +func (self *ResourceHandler) loadResource(nameHash common.Hash, name string, refresh bool) (*resource, error) { + + if name == "" { + name = nameHash.Hex() + } // if the resource is not known to this session we must load it // if refresh is set, we force load @@ -350,7 +428,7 @@ func (self *ResourceHandler) loadResource(name string, refresh bool) (*resource, return nil, fmt.Errorf("Invalid name '%s'", name) } rsrc.name = &name - rsrc.nameHash = self.nameHash(name) + rsrc.nameHash = nameHash // get the root info chunk and update the cached value chunk, err := self.Get(Key(rsrc.nameHash[:])) @@ -381,6 +459,7 @@ func (self *ResourceHandler) updateResourceIndex(rsrc *resource, chunk *Chunk) ( if *rsrc.name != name { return nil, fmt.Errorf("Update belongs to '%s', but have '%s'", name, *rsrc.name) } + log.Trace("update", "name", *rsrc.name, "rootkey", rsrc.nameHash, "updatekey", chunk.Key, "period", period, "version", version) // only check signature if validator is present if self.validator != nil { digest := self.keyDataHash(chunk.Key, data) @@ -391,6 +470,7 @@ func (self *ResourceHandler) updateResourceIndex(rsrc *resource, chunk *Chunk) ( } // update our rsrcs entry map + rsrc.lastKey = chunk.Key rsrc.lastPeriod = period rsrc.version = version rsrc.updated = time.Now() @@ -406,19 +486,11 @@ func (self *ResourceHandler) updateResourceIndex(rsrc *resource, chunk *Chunk) ( func (self *ResourceHandler) parseUpdate(chunkdata []byte) (*Signature, uint32, uint32, string, []byte, error) { var err error cursor := 0 - var signature *Signature - // omit signatures if we have no validator - var sigoffset int - if self.validator != nil { - signature = &Signature{} - copy(signature[:], chunkdata[:signatureLength]) - sigoffset = signatureLength - cursor = sigoffset - } - headerlength := binary.LittleEndian.Uint16(chunkdata[cursor : cursor+2]) - if int(headerlength+2) > len(chunkdata) { - err = fmt.Errorf("Reported header length %d longer than actual data length %d", headerlength, len(chunkdata)) + cursor += 2 + datalength := binary.LittleEndian.Uint16(chunkdata[cursor : cursor+2]) + if int(headerlength+datalength+4) > len(chunkdata) { + err = fmt.Errorf("Reported headerlength %d + datalength %d longer than actual chunk data length %d", headerlength, datalength, len(chunkdata)) return nil, 0, 0, "", nil, err } @@ -431,12 +503,22 @@ func (self *ResourceHandler) parseUpdate(chunkdata []byte) (*Signature, uint32, cursor += 4 version = binary.LittleEndian.Uint32(chunkdata[cursor : cursor+4]) cursor += 4 - namelength := int(headerlength) - cursor + sigoffset + 2 + namelength := int(headerlength) - cursor + 4 name = string(chunkdata[cursor : cursor+namelength]) cursor += namelength - data = make([]byte, len(chunkdata)-cursor) - copy(data, chunkdata[cursor:]) - return signature, period, version, name, data, err + intdatalength := int(datalength) + data = make([]byte, intdatalength) + copy(data, chunkdata[cursor:cursor+intdatalength]) + + // omit signatures if we have no validator + var signature *Signature + if self.validator != nil { + cursor += intdatalength + signature = &Signature{} + copy(signature[:], chunkdata[cursor:cursor+signatureLength]) + } + + return signature, period, version, name, data, nil } // Adds an actual data update @@ -445,30 +527,30 @@ func (self *ResourceHandler) parseUpdate(chunkdata []byte) (*Signature, uint32, // It is the caller's responsibility to make sure that this data is not stale. // // A resource update cannot span chunks, and thus has max length 4096 -func (self *ResourceHandler) Update(name string, data []byte) (Key, error) { +func (self *ResourceHandler) Update(ctx context.Context, name string, data []byte) (Key, error) { - var sigoffset int + var signaturelength int if self.validator != nil { - sigoffset = signatureLength + signaturelength = signatureLength } // get the cached information rsrc := self.getResource(name) if rsrc == nil { - return nil, fmt.Errorf("Resource object not in index") + return nil, errors.New("Resource object not in index") } if !rsrc.isSynced() { - return nil, fmt.Errorf("Resource object not in sync") + return nil, errors.New("Resource object not in sync") } // an update can be only one chunk long - datalimit := self.chunkSize() - int64(sigoffset-len(name)-8) + datalimit := self.chunkSize() - int64(signaturelength-len(name)-4-4-2-2) if int64(len(data)) > datalimit { return nil, fmt.Errorf("Data overflow: %d / %d bytes", len(data), datalimit) } // get our blockheight at this time and the next block of the update period - currentblock, err := self.getBlock() + currentblock, err := self.GetBlock(ctx) if err != nil { return nil, err } @@ -536,17 +618,12 @@ func (self *ResourceHandler) Close() { self.ChunkStore.Close() } -func (self *ResourceHandler) getBlock() (uint64, error) { - // get the block height and convert to uint64 - var currentblock string - err := self.rpcClient.Call(¤tblock, "eth_blockNumber") +func (self *ResourceHandler) GetBlock(ctx context.Context) (uint64, error) { + blockheader, err := self.ethClient.HeaderByNumber(ctx, nil) if err != nil { return 0, err } - if currentblock == "0x0" { - return 0, nil - } - return strconv.ParseUint(currentblock, 10, 64) + return blockheader.Number.Uint64(), nil } // Calculate the period index (aka major version number) from a given block number @@ -575,23 +652,20 @@ func (self *ResourceHandler) setResource(name string, rsrc *resource) { // used for chunk keys func (self *ResourceHandler) resourceHash(period uint32, version uint32, namehash common.Hash) Key { // format is: hash(period|version|namehash) - self.hashLock.Lock() - defer self.hashLock.Unlock() - self.hasher.Reset() + hasher := self.hashPool.Get().(SwarmHash) + defer self.hashPool.Put(hasher) + hasher.Reset() b := make([]byte, 4) binary.LittleEndian.PutUint32(b, period) - self.hasher.Write(b) + hasher.Write(b) binary.LittleEndian.PutUint32(b, version) - self.hasher.Write(b) - self.hasher.Write(namehash[:]) - return self.hasher.Sum(nil) + hasher.Write(b) + hasher.Write(namehash[:]) + return hasher.Sum(nil) } func (self *ResourceHandler) hasUpdate(name string, period uint32) bool { - if self.resources[name].lastPeriod == period { - return true - } - return false + return self.resources[name].lastPeriod == period } func getAddressFromDataSig(datahash common.Hash, signature Signature) (common.Address, error) { @@ -606,27 +680,30 @@ func getAddressFromDataSig(datahash common.Hash, signature Signature) (common.Ad func newUpdateChunk(key Key, signature *Signature, period uint32, version uint32, name string, data []byte) *Chunk { // no signatures if no validator - var sigoffset int + var signaturelength int if signature != nil { - sigoffset = signatureLength + signaturelength = signatureLength } // prepend version and period to allow reverse lookups - headerlength := uint16(len(name) + 4 + 4) + headerlength := len(name) + 4 + 4 + + // also prepend datalength + datalength := len(data) chunk := NewChunk(key, nil) - chunk.SData = make([]byte, sigoffset+int(headerlength)+2+len(data)) + chunk.SData = make([]byte, 4+signaturelength+headerlength+datalength) // initial 4 are uint16 length descriptors for headerlength and datalength + // data header length does NOT include the header length prefix bytes themselves cursor := 0 - if signature != nil { - copy(chunk.SData, (*signature)[:]) - cursor += signatureLength - } + binary.LittleEndian.PutUint16(chunk.SData[cursor:], uint16(headerlength)) + cursor += 2 - // data header length does NOT include the header length prefix bytes themselves - binary.LittleEndian.PutUint16(chunk.SData[cursor:], headerlength) + // data length + binary.LittleEndian.PutUint16(chunk.SData[cursor:], uint16(datalength)) cursor += 2 + // header = period + version + name binary.LittleEndian.PutUint32(chunk.SData[cursor:], period) cursor += 4 @@ -637,8 +714,15 @@ func newUpdateChunk(key Key, signature *Signature, period uint32, version uint32 copy(chunk.SData[cursor:], namebytes) cursor += len(namebytes) + // add the data copy(chunk.SData[cursor:], data) + // if signature is present it's the last item in the chunk data + if signature != nil { + cursor += datalength + copy(chunk.SData[cursor:], signature[:]) + } + chunk.Size = int64(len(chunk.SData)) return chunk } @@ -671,15 +755,17 @@ func (r *resourceChunkStore) Get(key Key) (*Chunk, error) { t := time.NewTimer(time.Second * 1) select { case <-t.C: - return nil, fmt.Errorf("timeout") - case <-chunk.C: + return nil, errors.New("timeout") + case <-chunk.Req.C: log.Trace("Received resource update chunk", "peer", chunk.Req.Source) } return chunk, nil } func (r *resourceChunkStore) Put(chunk *Chunk) { + chunk.wg = &sync.WaitGroup{} r.netStore.Put(chunk) + chunk.wg.Wait() } func (r *resourceChunkStore) Close() { @@ -694,11 +780,7 @@ func getNextPeriod(start uint64, current uint64, frequency uint64) uint32 { } func ToSafeName(name string) (string, error) { - validname, err := idna.ToASCII(name) - if err != nil { - return "", err - } - return validname, nil + return idna.ToASCII(name) } // check that name identifiers contain valid bytes @@ -710,18 +792,15 @@ func isSafeName(name string) bool { if err != nil { return false } - if validname != name { - return false - } - return true + return validname == name } // convenience for creating signature hashes of update data func (self *ResourceHandler) keyDataHash(key Key, data []byte) common.Hash { - self.hashLock.Lock() - defer self.hashLock.Unlock() - self.hasher.Reset() - self.hasher.Write(key[:]) - self.hasher.Write(data) - return common.BytesToHash(self.hasher.Sum(nil)) + hasher := self.hashPool.Get().(SwarmHash) + defer self.hashPool.Put(hasher) + hasher.Reset() + hasher.Write(key[:]) + hasher.Write(data) + return common.BytesToHash(hasher.Sum(nil)) } diff --git a/swarm/storage/resource_ens.go b/swarm/storage/resource_ens.go index 0a4500309d4f..33163bc97aa5 100644 --- a/swarm/storage/resource_ens.go +++ b/swarm/storage/resource_ens.go @@ -1,7 +1,7 @@ package storage import ( - "fmt" + "errors" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -10,15 +10,20 @@ import ( type baseValidator struct { signFunc SignFunc + hashsize int } func (b *baseValidator) sign(datahash common.Hash) (signature Signature, err error) { if b.signFunc == nil { - return signature, fmt.Errorf("No signature function") + return signature, errors.New("No signature function") } return b.signFunc(datahash) } +func (b *baseValidator) hashSize() int { + return b.hashsize +} + // ENS validation of mutable resource owners type ENSValidator struct { *baseValidator @@ -30,6 +35,7 @@ func NewENSValidator(contractaddress common.Address, backend bind.ContractBacken validator := &ENSValidator{ baseValidator: &baseValidator{ signFunc: signFunc, + hashsize: common.HashLength, }, } validator.api, err = ens.NewENS(transactOpts, contractaddress, backend) diff --git a/swarm/storage/resource_sign.go b/swarm/storage/resource_sign.go new file mode 100644 index 000000000000..840e705ac115 --- /dev/null +++ b/swarm/storage/resource_sign.go @@ -0,0 +1,20 @@ +package storage + +import ( + "crypto/ecdsa" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// matches the SignFunc type +func NewGenericResourceSigner(privKey *ecdsa.PrivateKey) SignFunc { + return func(data common.Hash) (signature Signature, err error) { + signaturebytes, err := crypto.Sign(data.Bytes(), privKey) + if err != nil { + return + } + copy(signature[:], signaturebytes) + return + } +} diff --git a/swarm/storage/resource_test.go b/swarm/storage/resource_test.go index d13070942830..376b8397aed1 100644 --- a/swarm/storage/resource_test.go +++ b/swarm/storage/resource_test.go @@ -2,15 +2,15 @@ package storage import ( "bytes" + "context" "crypto/ecdsa" "crypto/rand" "encoding/binary" + "flag" "fmt" "io/ioutil" "math/big" "os" - "path/filepath" - "strconv" "strings" "testing" "time" @@ -21,9 +21,9 @@ import ( "github.com/ethereum/go-ethereum/contracts/ens" "github.com/ethereum/go-ethereum/contracts/ens/contract" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rpc" ) var ( @@ -38,7 +38,11 @@ var ( func init() { var err error - log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))) + verbose := flag.Bool("v", false, "verbose") + flag.Parse() + if *verbose { + log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))) + } safeName, err = ToSafeName(domainName) if err != nil { panic(err) @@ -49,7 +53,7 @@ func init() { // so we use this wrapper to fake returning the block count type fakeBackend struct { *backends.SimulatedBackend - blocknumber uint64 + blocknumber int64 } func (f *fakeBackend) Commit() { @@ -59,13 +63,12 @@ func (f *fakeBackend) Commit() { f.blocknumber++ } -// for faking the rpc service, since we don't need the whole node stack -type FakeRPC struct { - backend *fakeBackend -} - -func (r *FakeRPC) BlockNumber() (string, error) { - return strconv.FormatUint(r.backend.blocknumber, 10), nil +func (f *fakeBackend) HeaderByNumber(context context.Context, bigblock *big.Int) (*types.Header, error) { + f.blocknumber++ + biggie := big.NewInt(f.blocknumber) + return &types.Header{ + Number: biggie, + }, nil } // check that signature address matches update signer address @@ -83,8 +86,9 @@ func TestResourceReverse(t *testing.T) { // set up rpc and create resourcehandler rh, _, _, teardownTest, err := setupTest(nil, newTestValidator(signer.signContent)) if err != nil { - teardownTest(t, err) + t.Fatal(err) } + defer teardownTest() // generate a hash for block 4200 version 1 key := rh.resourceHash(period, version, rh.nameHash(safeName)) @@ -93,48 +97,50 @@ func TestResourceReverse(t *testing.T) { data := make([]byte, 8) _, err = rand.Read(data) if err != nil { - teardownTest(t, err) + t.Fatal(err) } testHasher.Reset() testHasher.Write(data) digest := rh.keyDataHash(key, data) sig, err := rh.validator.sign(digest) if err != nil { - teardownTest(t, err) + t.Fatal(err) } chunk := newUpdateChunk(key, &sig, period, version, safeName, data) // check that we can recover the owner account from the update chunk's signature checksig, checkperiod, checkversion, checkname, checkdata, err := rh.parseUpdate(chunk.SData) + if err != nil { + t.Fatal(err) + } checkdigest := rh.keyDataHash(chunk.Key, checkdata) recoveredaddress, err := getAddressFromDataSig(checkdigest, *checksig) if err != nil { - teardownTest(t, fmt.Errorf("Retrieve address from signature fail: %v", err)) + t.Fatalf("Retrieve address from signature fail: %v", err) } originaladdress := crypto.PubkeyToAddress(signer.privKey.PublicKey) // check that the metadata retrieved from the chunk matches what we gave it if recoveredaddress != originaladdress { - teardownTest(t, fmt.Errorf("addresses dont match: %x != %x", originaladdress, recoveredaddress)) + t.Fatalf("addresses dont match: %x != %x", originaladdress, recoveredaddress) } if !bytes.Equal(key[:], chunk.Key[:]) { - teardownTest(t, fmt.Errorf("Expected chunk key '%x', was '%x'", key, chunk.Key)) + t.Fatalf("Expected chunk key '%x', was '%x'", key, chunk.Key) } if period != checkperiod { - teardownTest(t, fmt.Errorf("Expected period '%d', was '%d'", period, checkperiod)) + t.Fatalf("Expected period '%d', was '%d'", period, checkperiod) } if version != checkversion { - teardownTest(t, fmt.Errorf("Expected version '%d', was '%d'", version, checkversion)) + t.Fatalf("Expected version '%d', was '%d'", version, checkversion) } if safeName != checkname { - teardownTest(t, fmt.Errorf("Expected name '%s', was '%s'", safeName, checkname)) + t.Fatalf("Expected name '%s', was '%s'", safeName, checkname) } if !bytes.Equal(data, checkdata) { - teardownTest(t, fmt.Errorf("Expectedn data '%x', was '%x'", data, checkdata)) + t.Fatalf("Expectedn data '%x', was '%x'", data, checkdata) } - teardownTest(t, nil) } // make updates and retrieve them based on periods and versions @@ -142,67 +148,70 @@ func TestResourceHandler(t *testing.T) { // make fake backend, set up rpc and create resourcehandler backend := &fakeBackend{ - blocknumber: startBlock, + blocknumber: int64(startBlock), } rh, datadir, _, teardownTest, err := setupTest(backend, nil) if err != nil { - teardownTest(t, err) + t.Fatal(err) } + defer teardownTest() // create a new resource - _, err = rh.NewResource(safeName, resourceFrequency) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, err = rh.NewResource(ctx, safeName, resourceFrequency) if err != nil { - teardownTest(t, err) + t.Fatal(err) } // check that the new resource is stored correctly namehash := rh.nameHash(safeName) chunk, err := rh.ChunkStore.(*resourceChunkStore).localStore.(*LocalStore).memStore.Get(Key(namehash[:])) if err != nil { - teardownTest(t, err) + t.Fatal(err) } else if len(chunk.SData) < 16 { - teardownTest(t, fmt.Errorf("chunk data must be minimum 16 bytes, is %d", len(chunk.SData))) + t.Fatalf("chunk data must be minimum 16 bytes, is %d", len(chunk.SData)) } startblocknumber := binary.LittleEndian.Uint64(chunk.SData[:8]) chunkfrequency := binary.LittleEndian.Uint64(chunk.SData[8:]) - if startblocknumber != backend.blocknumber { - teardownTest(t, fmt.Errorf("stored block number %d does not match provided block number %d", startblocknumber, backend.blocknumber)) + if startblocknumber != uint64(backend.blocknumber) { + t.Fatalf("stored block number %d does not match provided block number %d", startblocknumber, backend.blocknumber) } if chunkfrequency != resourceFrequency { - teardownTest(t, fmt.Errorf("stored frequency %d does not match provided frequency %d", chunkfrequency, resourceFrequency)) + t.Fatalf("stored frequency %d does not match provided frequency %d", chunkfrequency, resourceFrequency) } // update halfway to first period resourcekey := make(map[string]Key) fwdBlocks(int(resourceFrequency/2), backend) data := []byte("blinky") - resourcekey["blinky"], err = rh.Update(safeName, data) + resourcekey["blinky"], err = rh.Update(ctx, safeName, data) if err != nil { - teardownTest(t, err) + t.Fatal(err) } // update on first period fwdBlocks(int(resourceFrequency/2), backend) data = []byte("pinky") - resourcekey["pinky"], err = rh.Update(safeName, data) + resourcekey["pinky"], err = rh.Update(ctx, safeName, data) if err != nil { - teardownTest(t, err) + t.Fatal(err) } // update on second period fwdBlocks(int(resourceFrequency), backend) data = []byte("inky") - resourcekey["inky"], err = rh.Update(safeName, data) + resourcekey["inky"], err = rh.Update(ctx, safeName, data) if err != nil { - teardownTest(t, err) + t.Fatal(err) } // update just after second period fwdBlocks(1, backend) data = []byte("clyde") - resourcekey["clyde"], err = rh.Update(safeName, data) + resourcekey["clyde"], err = rh.Update(ctx, safeName, data) if err != nil { - teardownTest(t, err) + t.Fatal(err) } time.Sleep(time.Second) rh.Close() @@ -211,46 +220,45 @@ func TestResourceHandler(t *testing.T) { // it will match on second iteration startblocknumber + (resourceFrequency * 3) fwdBlocks(int(resourceFrequency*2)-1, backend) - rh2, err := NewResourceHandler(datadir, &testCloudStore{}, rh.rpcClient, nil) - _, err = rh2.LookupLatest(safeName, true) + rh2, err := NewResourceHandler(datadir, &testCloudStore{}, rh.ethClient, nil) + _, err = rh2.LookupLatestByName(ctx, safeName, true) if err != nil { - teardownTest(t, err) + t.Fatal(err) } // last update should be "clyde", version two, blockheight startblocknumber + (resourcefrequency * 3) if !bytes.Equal(rh2.resources[safeName].data, []byte("clyde")) { - teardownTest(t, fmt.Errorf("resource data was %v, expected %v", rh2.resources[safeName].data, []byte("clyde"))) + t.Fatalf("resource data was %v, expected %v", rh2.resources[safeName].data, []byte("clyde")) } if rh2.resources[safeName].version != 2 { - teardownTest(t, fmt.Errorf("resource version was %d, expected 2", rh2.resources[safeName].version)) + t.Fatalf("resource version was %d, expected 2", rh2.resources[safeName].version) } if rh2.resources[safeName].lastPeriod != 3 { - teardownTest(t, fmt.Errorf("resource period was %d, expected 3", rh2.resources[safeName].lastPeriod)) + t.Fatalf("resource period was %d, expected 3", rh2.resources[safeName].lastPeriod) } log.Debug("Latest lookup", "period", rh2.resources[safeName].lastPeriod, "version", rh2.resources[safeName].version, "data", rh2.resources[safeName].data) // specific block, latest version - rsrc, err := rh2.LookupHistorical(safeName, 3, true) + rsrc, err := rh2.LookupHistoricalByName(ctx, safeName, 3, true) if err != nil { - teardownTest(t, err) + t.Fatal(err) } // check data if !bytes.Equal(rsrc.data, []byte("clyde")) { - teardownTest(t, fmt.Errorf("resource data (historical) was %v, expected %v", rh2.resources[domainName].data, []byte("clyde"))) + t.Fatalf("resource data (historical) was %v, expected %v", rh2.resources[domainName].data, []byte("clyde")) } log.Debug("Historical lookup", "period", rh2.resources[safeName].lastPeriod, "version", rh2.resources[safeName].version, "data", rh2.resources[safeName].data) // specific block, specific version - rsrc, err = rh2.LookupVersion(safeName, 3, 1, true) + rsrc, err = rh2.LookupVersionByName(ctx, safeName, 3, 1, true) if err != nil { - teardownTest(t, err) + t.Fatal(err) } // check data if !bytes.Equal(rsrc.data, []byte("inky")) { - teardownTest(t, fmt.Errorf("resource data (historical) was %v, expected %v", rh2.resources[domainName].data, []byte("inky"))) + t.Fatalf("resource data (historical) was %v, expected %v", rh2.resources[domainName].data, []byte("inky")) } log.Debug("Specific version lookup", "period", rh2.resources[safeName].lastPeriod, "version", rh2.resources[safeName].version, "data", rh2.resources[safeName].data) - teardownTest(t, nil) } @@ -282,34 +290,35 @@ func TestResourceENSOwner(t *testing.T) { // set up rpc and create resourcehandler with ENS sim backend rh, _, _, teardownTest, err := setupTest(contractbackend, validator) if err != nil { - teardownTest(t, err) + t.Fatal(err) } + defer teardownTest() // create new resource when we are owner = ok - _, err = rh.NewResource(safeName, resourceFrequency) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, err = rh.NewResource(ctx, safeName, resourceFrequency) if err != nil { - teardownTest(t, fmt.Errorf("Create resource fail: %v", err)) + t.Fatalf("Create resource fail: %v", err) } data := []byte("foo") // update resource when we are owner = ok - _, err = rh.Update(safeName, data) + _, err = rh.Update(ctx, safeName, data) if err != nil { - teardownTest(t, fmt.Errorf("Update resource fail: %v", err)) + t.Fatalf("Update resource fail: %v", err) } // update resource when we are owner = ok signertwo, err := newTestSigner() if err != nil { - teardownTest(t, err) + t.Fatal(err) } rh.validator.(*ENSValidator).signFunc = signertwo.signContent - _, err = rh.Update(safeName, data) + _, err = rh.Update(ctx, safeName, data) if err == nil { - teardownTest(t, fmt.Errorf("Expected resource update fail due to owner mismatch")) + t.Fatalf("Expected resource update fail due to owner mismatch") } - - teardownTest(t, nil) } // fast-forward blockheight @@ -320,7 +329,7 @@ func fwdBlocks(count int, backend *fakeBackend) { } // create rpc and resourcehandler -func setupTest(contractbackend bind.ContractBackend, validator ResourceValidator) (rh *ResourceHandler, datadir string, signer *testSigner, teardown func(*testing.T, error), err error) { +func setupTest(backend ethApi, validator ResourceValidator) (rh *ResourceHandler, datadir string, signer *testSigner, teardown func(), err error) { var fsClean func() var rpcClean func() @@ -336,53 +345,18 @@ func setupTest(contractbackend bind.ContractBackend, validator ResourceValidator // temp datadir datadir, err = ioutil.TempDir("", "rh") if err != nil { - return + return nil, "", nil, nil, err } fsClean = func() { os.RemoveAll(datadir) } - // starting the whole stack just to get blocknumbers is too cumbersome - // so we fake the rpc server to get blocknumbers for testing - ipcpath := filepath.Join(datadir, "test.ipc") - ipcl, err := rpc.CreateIPCListener(ipcpath) - if err != nil { - return - } - rpcserver := rpc.NewServer() - var fake *fakeBackend - if contractbackend != nil { - fake = contractbackend.(*fakeBackend) - } - rpcserver.RegisterName("eth", &FakeRPC{ - backend: fake, - }) - go func() { - rpcserver.ServeListener(ipcl) - }() - rpcClean = func() { - rpcserver.Stop() - } - - // connect to fake rpc - rpcclient, err := rpc.Dial(ipcpath) - if err != nil { - return - } - - rh, err = NewResourceHandler(datadir, &testCloudStore{}, rpcclient, validator) - teardown = func(t *testing.T, err error) { - cleanF() - if err != nil { - t.Fatal(err) - } - } - - return + rh, err = NewResourceHandler(datadir, &testCloudStore{}, backend, validator) + return rh, datadir, signer, cleanF, nil } // Set up simulated ENS backend for use with ENSResourceHandler tests -func setupENS(addr common.Address, transactOpts *bind.TransactOpts, sub string, top string) (common.Address, bind.ContractBackend, error) { +func setupENS(addr common.Address, transactOpts *bind.TransactOpts, sub string, top string) (common.Address, *fakeBackend, error) { // create the domain hash values to pass to the ENS contract methods var tophash [32]byte @@ -426,8 +400,9 @@ func setupENS(addr common.Address, transactOpts *bind.TransactOpts, sub string, // implementation of an external signer to pass to validator type testSigner struct { - privKey *ecdsa.PrivateKey - hasher SwarmHash + privKey *ecdsa.PrivateKey + hasher SwarmHash + signContent SignFunc } func newTestSigner() (*testSigner, error) { @@ -436,21 +411,12 @@ func newTestSigner() (*testSigner, error) { return nil, err } return &testSigner{ - privKey: privKey, - hasher: testHasher, + privKey: privKey, + hasher: testHasher, + signContent: NewGenericResourceSigner(privKey), }, nil } -// matches the SignFunc type -func (self *testSigner) signContent(data common.Hash) (signature Signature, err error) { - signaturebytes, err := crypto.Sign(data.Bytes(), self.privKey) - if err != nil { - return - } - copy(signature[:], signaturebytes) - return -} - type testCloudStore struct { } diff --git a/swarm/swarm.go b/swarm/swarm.go index 14826bb02159..a95bc176f756 100644 --- a/swarm/swarm.go +++ b/swarm/swarm.go @@ -22,6 +22,8 @@ import ( "crypto/ecdsa" "fmt" "net" + "os" + "path/filepath" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -53,15 +55,13 @@ type Swarm struct { storage storage.ChunkStore // internal access to storage, common interface to cloud storage backends dpa *storage.DPA // distributed preimage archive, the local API to the storage with document level storage/retrieval support //depo network.StorageHandler // remote request handler, interface between bzz protocol and the storage - cloud storage.CloudStore // procurement, cloud storage backend (can multi-cloud) - bzz *network.Bzz // the logistic manager - backend chequebook.Backend // simple blockchain Backend - privateKey *ecdsa.PrivateKey - corsString string - swapEnabled bool - lstore *storage.LocalStore // local store, needs to store for releasing resources after node stopped - sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit - ps *pss.Pss + cloud storage.CloudStore // procurement, cloud storage backend (can multi-cloud) + bzz *network.Bzz // the logistic manager + backend chequebook.Backend // simple blockchain Backend + privateKey *ecdsa.PrivateKey + lstore *storage.LocalStore // local store, needs to store for releasing resources after node stopped + sfs *fuse.SwarmFS // need this to cleanup all the active mounts on node exit + ps *pss.Pss } type SwarmAPI struct { @@ -82,7 +82,8 @@ func (self *Swarm) API() *SwarmAPI { // implements node.Service // If mockStore is not nil, it will be used as the storage for chunk data. // MockStore should be used only for testing. -func NewSwarm(ctx *node.ServiceContext, backend chequebook.Backend, ensClient *ethclient.Client, config *api.Config, swapEnabled, syncEnabled bool, cors string, pssEnabled bool, mockStore *mock.NodeStore) (self *Swarm, err error) { +func NewSwarm(ctx *node.ServiceContext, backend chequebook.Backend, ensClient *ethclient.Client, config *api.Config, mockStore *mock.NodeStore) (self *Swarm, err error) { + if bytes.Equal(common.FromHex(config.PublicKey), storage.ZeroKey) { return nil, fmt.Errorf("empty public key") } @@ -91,11 +92,9 @@ func NewSwarm(ctx *node.ServiceContext, backend chequebook.Backend, ensClient *e } self = &Swarm{ - config: config, - swapEnabled: swapEnabled, - backend: backend, - privateKey: config.Swap.PrivateKey(), - corsString: cors, + config: config, + backend: backend, + privateKey: config.ShiftPrivateKey(), } log.Debug(fmt.Sprintf("Setting up Swarm service components")) @@ -137,7 +136,7 @@ func NewSwarm(ctx *node.ServiceContext, backend chequebook.Backend, ensClient *e log.Debug(fmt.Sprintf("-> Content Store API")) // Pss = postal service over swarm (devp2p over bzz) - if pssEnabled { + if self.config.PssEnabled { pssparams := pss.NewPssParams(self.privateKey) self.ps = pss.NewPss(to, self.dpa, pssparams) if pss.IsActiveHandshake { @@ -158,7 +157,23 @@ func NewSwarm(ctx *node.ServiceContext, backend chequebook.Backend, ensClient *e } log.Debug(fmt.Sprintf("-> Swarm Domain Name Registrar @ address %v", config.EnsRoot.Hex())) - self.api = api.NewApi(self.dpa, self.dns) + var resourceHandler *storage.ResourceHandler + // if use resource updates + if self.config.ResourceEnabled { + var resourceValidator storage.ResourceValidator + if self.dns != nil { + resourceValidator, err = storage.NewENSValidator(config.EnsRoot, ensClient, transactOpts, storage.NewGenericResourceSigner(self.privateKey)) + if err != nil { + return nil, err + } + } + resourceHandler, err = storage.NewResourceHandler(filepath.Join(self.config.Path, storage.DbDirName), self.cloud, ensClient, resourceValidator) + if err != nil { + return nil, err + } + } + + self.api = api.NewApi(self.dpa, self.dns, resourceHandler) // Manifests for Smart Hosting log.Debug(fmt.Sprintf("-> Web3 virtual server API")) @@ -186,7 +201,7 @@ func (self *Swarm) Start(srv *p2p.Server) error { log.Warn("Updated bzz local addr", "oaddr", fmt.Sprintf("%x", newaddr.OAddr), "uaddr", fmt.Sprintf("%x", newaddr.UAddr)) // set chequebook - if self.swapEnabled { + if self.config.SwapEnabled { ctx := context.Background() // The initial setup has no deadline. err := self.SetChequebook(ctx) if err != nil { @@ -219,14 +234,14 @@ func (self *Swarm) Start(srv *p2p.Server) error { addr := net.JoinHostPort(self.config.ListenAddr, self.config.Port) go httpapi.StartHttpServer(self.api, &httpapi.ServerConfig{ Addr: addr, - CorsString: self.corsString, + CorsString: self.config.Cors, }) } log.Debug(fmt.Sprintf("Swarm http proxy started on port: %v", self.config.Port)) - if self.corsString != "" { - log.Debug(fmt.Sprintf("Swarm http proxy started with corsdomain: %v", self.corsString)) + if self.config.Cors != "" { + log.Debug(fmt.Sprintf("Swarm http proxy started with corsdomain: %v", self.config.Cors)) } return nil @@ -360,7 +375,7 @@ func NewLocalSwarm(datadir, port string) (self *Swarm, err error) { } self = &Swarm{ - api: api.NewApi(dpa, nil), + api: api.NewApi(dpa, nil, nil), config: config, } diff --git a/swarm/testutil/http.go b/swarm/testutil/http.go index b1dd4d4e6521..77e3386fd217 100644 --- a/swarm/testutil/http.go +++ b/swarm/testutil/http.go @@ -17,16 +17,31 @@ package testutil import ( + "context" "io/ioutil" + "math/big" "net/http/httptest" "os" "testing" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/swarm/api" httpapi "github.com/ethereum/go-ethereum/swarm/api/http" "github.com/ethereum/go-ethereum/swarm/storage" ) +type fakeBackend struct { + blocknumber int64 +} + +func (f *fakeBackend) HeaderByNumber(context context.Context, bigblock *big.Int) (*types.Header, error) { + f.blocknumber++ + biggie := big.NewInt(f.blocknumber) + return &types.Header{ + Number: biggie, + }, nil +} + func NewTestSwarmServer(t *testing.T) *TestSwarmServer { dir, err := ioutil.TempDir("", "swarm-storage-test") if err != nil { @@ -38,7 +53,7 @@ func NewTestSwarmServer(t *testing.T) *TestSwarmServer { CacheCapacity: 5000, Radius: 0, } - localStore, err := storage.NewLocalStore(storage.MakeHashFunc("SHA3"), storeparams, nil) + localStore, err := storage.NewLocalStore(storage.MakeHashFunc(storage.SHA3Hash), storeparams, nil) if err != nil { os.RemoveAll(dir) t.Fatal(err) @@ -49,24 +64,55 @@ func NewTestSwarmServer(t *testing.T) *TestSwarmServer { ChunkStore: localStore, } dpa.Start() - a := api.NewApi(dpa, nil) + + // mutable resources test setup + resourceDir, err := ioutil.TempDir("", "swarm-resource-test") + if err != nil { + t.Fatal(err) + } + + rh, err := storage.NewResourceHandler(resourceDir, &testCloudStore{}, &fakeBackend{}, nil) + if err != nil { + t.Fatal(err) + } + + a := api.NewApi(dpa, nil, rh) srv := httptest.NewServer(httpapi.NewServer(a)) return &TestSwarmServer{ Server: srv, Dpa: dpa, dir: dir, + Hasher: storage.MakeHashFunc(storage.SHA3Hash)(), + cleanup: func() { + srv.Close() + rh.Close() + dpa.Stop() + os.RemoveAll(dir) + os.RemoveAll(resourceDir) + }, } } type TestSwarmServer struct { *httptest.Server - - Dpa *storage.DPA - dir string + Hasher storage.SwarmHash + Dpa *storage.DPA + dir string + cleanup func() } func (t *TestSwarmServer) Close() { - t.Server.Close() - t.Dpa.Stop() - os.RemoveAll(t.dir) + t.cleanup() +} + +type testCloudStore struct { +} + +func (c *testCloudStore) Store(*storage.Chunk) { +} + +func (c *testCloudStore) Deliver(*storage.Chunk) { +} + +func (c *testCloudStore) Retrieve(*storage.Chunk) { }