diff --git a/.travis.yml b/.travis.yml index 6eab0b041..e0dfb3646 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,7 @@ jobs: - stage: lint os: linux dist: xenial - go: 1.12.x + go: 1.13.x env: - lint git: diff --git a/README.md b/README.md index 3bfd24906..92a7125b4 100644 --- a/README.md +++ b/README.md @@ -303,7 +303,7 @@ ones either). To start a `geth` instance for mining, run it with all your usual by: ```shell -$ geth --mine --minerthreads=1 --etherbase=0x0000000000000000000000000000000000000000 +$ geth --mine --miner.threads=1 --etherbase=0x0000000000000000000000000000000000000000 ``` Which will start mining blocks and transactions on a single CPU thread, crediting all diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go index e145cbdd7..efa985223 100644 --- a/accounts/abi/abi.go +++ b/accounts/abi/abi.go @@ -75,9 +75,6 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) { // Unpack output in v according to the abi specification func (abi ABI) Unpack(v interface{}, name string, data []byte) (err error) { - if len(data) == 0 { - return fmt.Errorf("abi: unmarshalling empty output") - } // since there can't be naming collisions with contracts and events, // we need to decide whether we're calling a method or an event if method, ok := abi.Methods[name]; ok { @@ -94,9 +91,6 @@ func (abi ABI) Unpack(v interface{}, name string, data []byte) (err error) { // UnpackIntoMap unpacks a log into the provided map[string]interface{} func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte) (err error) { - if len(data) == 0 { - return fmt.Errorf("abi: unmarshalling empty output") - } // since there can't be naming collisions with contracts and events, // we need to decide whether we're calling a method or an event if method, ok := abi.Methods[name]; ok { diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go index 6526d1fe9..3ab231ed8 100644 --- a/accounts/abi/abi_test.go +++ b/accounts/abi/abi_test.go @@ -57,7 +57,7 @@ const jsondata2 = ` ]` func TestReader(t *testing.T) { - Uint256, _ := NewType("uint256", nil) + Uint256, _ := NewType("uint256", "", nil) exp := ABI{ Methods: map[string]Method{ "balance": { @@ -172,7 +172,7 @@ func TestTestSlice(t *testing.T) { } func TestMethodSignature(t *testing.T) { - String, _ := NewType("string", nil) + String, _ := NewType("string", "", nil) m := Method{"foo", "foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil} exp := "foo(string,string)" if m.Sig() != exp { @@ -184,7 +184,7 @@ func TestMethodSignature(t *testing.T) { t.Errorf("expected ids to match %x != %x", m.ID(), idexp) } - uintt, _ := NewType("uint256", nil) + uintt, _ := NewType("uint256", "", nil) m = Method{"foo", "foo", false, []Argument{{"bar", uintt, false}}, nil} exp = "foo(uint256)" if m.Sig() != exp { @@ -192,7 +192,7 @@ func TestMethodSignature(t *testing.T) { } // Method with tuple arguments - s, _ := NewType("tuple", []ArgumentMarshaling{ + s, _ := NewType("tuple", "", []ArgumentMarshaling{ {Name: "a", Type: "int256"}, {Name: "b", Type: "int256[]"}, {Name: "c", Type: "tuple[]", Components: []ArgumentMarshaling{ @@ -602,9 +602,9 @@ func TestBareEvents(t *testing.T) { { "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] } ]` - arg0, _ := NewType("uint256", nil) - arg1, _ := NewType("address", nil) - tuple, _ := NewType("tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}}) + arg0, _ := NewType("uint256", "", nil) + arg1, _ := NewType("address", "", nil) + tuple, _ := NewType("tuple", "", []ArgumentMarshaling{{Name: "a", Type: "uint256"}}) expectedEvents := map[string]struct { Anonymous bool diff --git a/accounts/abi/argument.go b/accounts/abi/argument.go index 4dae58653..f8ec11b9f 100644 --- a/accounts/abi/argument.go +++ b/accounts/abi/argument.go @@ -34,10 +34,11 @@ type Argument struct { type Arguments []Argument type ArgumentMarshaling struct { - Name string - Type string - Components []ArgumentMarshaling - Indexed bool + Name string + Type string + InternalType string + Components []ArgumentMarshaling + Indexed bool } // UnmarshalJSON implements json.Unmarshaler interface @@ -48,7 +49,7 @@ func (argument *Argument) UnmarshalJSON(data []byte) error { return fmt.Errorf("argument json err: %v", err) } - argument.Type, err = NewType(arg.Type, arg.Components) + argument.Type, err = NewType(arg.Type, arg.InternalType, arg.Components) if err != nil { return err } @@ -88,6 +89,13 @@ func (arguments Arguments) isTuple() bool { // Unpack performs the operation hexdata -> Go format func (arguments Arguments) Unpack(v interface{}, data []byte) error { + if len(data) == 0 { + if len(arguments) != 0 { + return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") + } else { + return nil // Nothing to unmarshal, return + } + } // make sure the passed value is arguments pointer if reflect.Ptr != reflect.ValueOf(v).Kind() { return fmt.Errorf("abi: Unpack(non-pointer %T)", v) @@ -104,11 +112,17 @@ func (arguments Arguments) Unpack(v interface{}, data []byte) error { // UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error { + if len(data) == 0 { + if len(arguments) != 0 { + return fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected") + } else { + return nil // Nothing to unmarshal, return + } + } marshalledValues, err := arguments.UnpackValues(data) if err != nil { return err } - return arguments.unpackIntoMap(v, marshalledValues) } diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index 710a17b2b..9d3bd8406 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -218,7 +218,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i } } // If the contract surely has code (or code is not needed), estimate the transaction - msg := ethereum.CallMsg{From: opts.From, To: contract, Value: value, Data: input} + msg := ethereum.CallMsg{From: opts.From, To: contract, GasPrice: gasPrice, Value: value, Data: input} gasLimit, err = c.transactor.EstimateGas(ensureContext(opts.Context), msg) if err != nil { return nil, fmt.Errorf("failed to estimate gas needed: %v", err) diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go index bca11136f..2af0f43fe 100644 --- a/accounts/abi/bind/bind.go +++ b/accounts/abi/bind/bind.go @@ -86,7 +86,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] if input.Name == "" { normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j) } - if _, exist := structs[input.Type.String()]; input.Type.T == abi.TupleTy && !exist { + if hasStruct(input.Type) { bindStructType[lang](input.Type, structs) } } @@ -96,7 +96,7 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] if output.Name != "" { normalized.Outputs[j].Name = capitalise(output.Name) } - if _, exist := structs[output.Type.String()]; output.Type.T == abi.TupleTy && !exist { + if hasStruct(output.Type) { bindStructType[lang](output.Type, structs) } } @@ -119,14 +119,11 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string] normalized.Inputs = make([]abi.Argument, len(original.Inputs)) copy(normalized.Inputs, original.Inputs) for j, input := range normalized.Inputs { - // Indexed fields are input, non-indexed ones are outputs - if input.Indexed { - if input.Name == "" { - normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j) - } - if _, exist := structs[input.Type.String()]; input.Type.T == abi.TupleTy && !exist { - bindStructType[lang](input.Type, structs) - } + if input.Name == "" { + normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j) + } + if hasStruct(input.Type) { + bindStructType[lang](input.Type, structs) } } // Append the event to the accumulator list @@ -244,7 +241,7 @@ func bindBasicTypeGo(kind abi.Type) string { func bindTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { switch kind.T { case abi.TupleTy: - return structs[kind.String()].Name + return structs[kind.TupleRawName+kind.String()].Name case abi.ArrayTy: return fmt.Sprintf("[%d]", kind.Size) + bindTypeGo(*kind.Elem, structs) case abi.SliceTy: @@ -321,7 +318,7 @@ func pluralizeJavaType(typ string) string { func bindTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { switch kind.T { case abi.TupleTy: - return structs[kind.String()].Name + return structs[kind.TupleRawName+kind.String()].Name case abi.ArrayTy, abi.SliceTy: return pluralizeJavaType(bindTypeJava(*kind.Elem, structs)) default: @@ -340,6 +337,13 @@ var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) // funcionality as for simple types, but dynamic types get converted to hashes. func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { bound := bindTypeGo(kind, structs) + + // todo(rjl493456442) according solidity documentation, indexed event + // parameters that are not value types i.e. arrays and structs are not + // stored directly but instead a keccak256-hash of an encoding is stored. + // + // We only convert stringS and bytes to hash, still need to deal with + // array(both fixed-size and dynamic-size) and struct. if bound == "string" || bound == "[]byte" { bound = "common.Hash" } @@ -350,6 +354,13 @@ func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { // funcionality as for simple types, but dynamic types get converted to hashes. func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { bound := bindTypeJava(kind, structs) + + // todo(rjl493456442) according solidity documentation, indexed event + // parameters that are not value types i.e. arrays and structs are not + // stored directly but instead a keccak256-hash of an encoding is stored. + // + // We only convert stringS and bytes to hash, still need to deal with + // array(both fixed-size and dynamic-size) and struct. if bound == "String" || bound == "byte[]" { bound = "Hash" } @@ -369,7 +380,14 @@ var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { switch kind.T { case abi.TupleTy: - if s, exist := structs[kind.String()]; exist { + // We compose raw struct name and canonical parameter expression + // together here. The reason is before solidity v0.5.11, kind.TupleRawName + // is empty, so we use canonical parameter expression to distinguish + // different struct definition. From the consideration of backward + // compatibility, we concat these two together so that if kind.TupleRawName + // is not empty, it can have unique id. + id := kind.TupleRawName + kind.String() + if s, exist := structs[id]; exist { return s.Name } var fields []*tmplField @@ -377,8 +395,11 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { field := bindStructTypeGo(*elem, structs) fields = append(fields, &tmplField{Type: field, Name: capitalise(kind.TupleRawNames[i]), SolKind: *elem}) } - name := fmt.Sprintf("Struct%d", len(structs)) - structs[kind.String()] = &tmplStruct{ + name := kind.TupleRawName + if name == "" { + name = fmt.Sprintf("Struct%d", len(structs)) + } + structs[id] = &tmplStruct{ Name: name, Fields: fields, } @@ -398,7 +419,14 @@ func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string { func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { switch kind.T { case abi.TupleTy: - if s, exist := structs[kind.String()]; exist { + // We compose raw struct name and canonical parameter expression + // together here. The reason is before solidity v0.5.11, kind.TupleRawName + // is empty, so we use canonical parameter expression to distinguish + // different struct definition. From the consideration of backward + // compatibility, we concat these two together so that if kind.TupleRawName + // is not empty, it can have unique id. + id := kind.TupleRawName + kind.String() + if s, exist := structs[id]; exist { return s.Name } var fields []*tmplField @@ -406,8 +434,11 @@ func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string { field := bindStructTypeJava(*elem, structs) fields = append(fields, &tmplField{Type: field, Name: decapitalise(kind.TupleRawNames[i]), SolKind: *elem}) } - name := fmt.Sprintf("Class%d", len(structs)) - structs[kind.String()] = &tmplStruct{ + name := kind.TupleRawName + if name == "" { + name = fmt.Sprintf("Class%d", len(structs)) + } + structs[id] = &tmplStruct{ Name: name, Fields: fields, } @@ -497,6 +528,21 @@ func structured(args abi.Arguments) bool { return true } +// hasStruct returns an indicator whether the given type is struct, struct slice +// or struct array. +func hasStruct(t abi.Type) bool { + switch t.T { + case abi.SliceTy: + return hasStruct(*t.Elem) + case abi.ArrayTy: + return hasStruct(*t.Elem) + case abi.TupleTy: + return true + default: + return false + } +} + // resolveArgName converts a raw argument representation into a user friendly format. func resolveArgName(arg abi.Argument, structs map[string]*tmplStruct) string { var ( @@ -512,7 +558,7 @@ loop: case abi.ArrayTy: prefix += fmt.Sprintf("[%d]", typ.Size) default: - embedded = typ.String() + embedded = typ.TupleRawName + typ.String() break loop } typ = typ.Elem diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index 28dd635e3..4ee86d09b 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -1085,7 +1085,10 @@ var bindTests = []struct { contract Tuple { struct S { uint a; uint[] b; T[] c; } struct T { uint x; uint y; } + struct P { uint8 x; uint8 y; } + struct Q { uint16 x; uint16 y; } event TupleEvent(S a, T[2][] b, T[][2] c, S[] d, uint[] e); + event TupleEvent2(P[]); function func1(S memory a, T[2][] memory b, T[][2] memory c, S[] memory d, uint[] memory e) public pure returns (S memory, T[2][] memory, T[][2] memory, S[] memory, uint[] memory) { return (a, b, c, d, e); @@ -1093,12 +1096,12 @@ var bindTests = []struct { function func2(S memory a, T[2][] memory b, T[][2] memory c, S[] memory d, uint[] memory e) public { emit TupleEvent(a, b, c, d, e); } + function func3(Q[] memory) public pure {} // call function, nothing to return } - `, - []string{`608060405234801561001057600080fd5b50610eb2806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063443c79b41461003b578063d0062cdd1461006f575b600080fd5b61005560048036036100509190810190610624565b61008b565b604051610066959493929190610b28565b60405180910390f35b61008960048036036100849190810190610624565b6100bc565b005b610093610102565b606061009d610123565b6060808989898989945094509450945094509550955095509550959050565b7f18d6e66efa53739ca6d13626f35ebc700b31cced3eddb50c70bbe9c082c6cd0085858585856040516100f3959493929190610b28565b60405180910390a15050505050565b60405180606001604052806000815260200160608152602001606081525090565b60405180604001604052806002905b60608152602001906001900390816101325790505090565b600082601f83011261015b57600080fd5b813561016e61016982610bcb565b610b9e565b9150818183526020840193506020810190508385608084028201111561019357600080fd5b60005b838110156101c357816101a988826102a6565b845260208401935060808301925050600181019050610196565b5050505092915050565b600082601f8301126101de57600080fd5b60026101f16101ec82610bf3565b610b9e565b9150818360005b83811015610228578135860161020e888261031a565b8452602084019350602083019250506001810190506101f8565b5050505092915050565b600082601f83011261024357600080fd5b813561025661025182610c15565b610b9e565b9150818183526020840193506020810190508360005b8381101561029c578135860161028288826104a3565b84526020840193506020830192505060018101905061026c565b5050505092915050565b600082601f8301126102b757600080fd5b60026102ca6102c582610c3d565b610b9e565b915081838560408402820111156102e057600080fd5b60005b8381101561031057816102f688826105c3565b8452602084019350604083019250506001810190506102e3565b5050505092915050565b600082601f83011261032b57600080fd5b813561033e61033982610c5f565b610b9e565b9150818183526020840193506020810190508385604084028201111561036357600080fd5b60005b83811015610393578161037988826105c3565b845260208401935060408301925050600181019050610366565b5050505092915050565b600082601f8301126103ae57600080fd5b81356103c16103bc82610c87565b610b9e565b915081818352602084019350602081019050838560208402820111156103e657600080fd5b60005b8381101561041657816103fc888261060f565b8452602084019350602083019250506001810190506103e9565b5050505092915050565b600082601f83011261043157600080fd5b813561044461043f82610caf565b610b9e565b9150818183526020840193506020810190508385602084028201111561046957600080fd5b60005b83811015610499578161047f888261060f565b84526020840193506020830192505060018101905061046c565b5050505092915050565b6000606082840312156104b557600080fd5b6104bf6060610b9e565b905060006104cf8482850161060f565b600083015250602082013567ffffffffffffffff8111156104ef57600080fd5b6104fb8482850161039d565b602083015250604082013567ffffffffffffffff81111561051b57600080fd5b6105278482850161031a565b60408301525092915050565b60006060828403121561054557600080fd5b61054f6060610b9e565b9050600061055f8482850161060f565b600083015250602082013567ffffffffffffffff81111561057f57600080fd5b61058b8482850161039d565b602083015250604082013567ffffffffffffffff8111156105ab57600080fd5b6105b78482850161031a565b60408301525092915050565b6000604082840312156105d557600080fd5b6105df6040610b9e565b905060006105ef8482850161060f565b60008301525060206106038482850161060f565b60208301525092915050565b60008135905061061e81610e58565b92915050565b600080600080600060a0868803121561063c57600080fd5b600086013567ffffffffffffffff81111561065657600080fd5b61066288828901610533565b955050602086013567ffffffffffffffff81111561067f57600080fd5b61068b8882890161014a565b945050604086013567ffffffffffffffff8111156106a857600080fd5b6106b4888289016101cd565b935050606086013567ffffffffffffffff8111156106d157600080fd5b6106dd88828901610232565b925050608086013567ffffffffffffffff8111156106fa57600080fd5b61070688828901610420565b9150509295509295909350565b600061071f83836108cb565b60808301905092915050565b60006107378383610922565b905092915050565b600061074b8383610a93565b905092915050565b600061075f8383610aea565b60408301905092915050565b60006107778383610b19565b60208301905092915050565b600061078e82610d3b565b6107988185610de3565b93506107a383610cd7565b8060005b838110156107d45781516107bb8882610713565b97506107c683610d88565b9250506001810190506107a7565b5085935050505092915050565b60006107ec82610d46565b6107f68185610df4565b93508360208202850161080885610ce7565b8060005b858110156108445784840389528151610825858261072b565b945061083083610d95565b925060208a0199505060018101905061080c565b50829750879550505050505092915050565b600061086182610d51565b61086b8185610dff565b93508360208202850161087d85610cf1565b8060005b858110156108b9578484038952815161089a858261073f565b94506108a583610da2565b925060208a01995050600181019050610881565b50829750879550505050505092915050565b6108d481610d5c565b6108de8184610e10565b92506108e982610d01565b8060005b8381101561091a5781516109018782610753565b965061090c83610daf565b9250506001810190506108ed565b505050505050565b600061092d82610d67565b6109378185610e1b565b935061094283610d0b565b8060005b8381101561097357815161095a8882610753565b975061096583610dbc565b925050600181019050610946565b5085935050505092915050565b600061098b82610d7d565b6109958185610e3d565b93506109a083610d2b565b8060005b838110156109d15781516109b8888261076b565b97506109c383610dd6565b9250506001810190506109a4565b5085935050505092915050565b60006109e982610d72565b6109f38185610e2c565b93506109fe83610d1b565b8060005b83811015610a2f578151610a16888261076b565b9750610a2183610dc9565b925050600181019050610a02565b5085935050505092915050565b6000606083016000830151610a546000860182610b19565b5060208301518482036020860152610a6c82826109de565b91505060408301518482036040860152610a868282610922565b9150508091505092915050565b6000606083016000830151610aab6000860182610b19565b5060208301518482036020860152610ac382826109de565b91505060408301518482036040860152610add8282610922565b9150508091505092915050565b604082016000820151610b006000850182610b19565b506020820151610b136020850182610b19565b50505050565b610b2281610e4e565b82525050565b600060a0820190508181036000830152610b428188610a3c565b90508181036020830152610b568187610783565b90508181036040830152610b6a81866107e1565b90508181036060830152610b7e8185610856565b90508181036080830152610b928184610980565b90509695505050505050565b6000604051905081810181811067ffffffffffffffff82111715610bc157600080fd5b8060405250919050565b600067ffffffffffffffff821115610be257600080fd5b602082029050602081019050919050565b600067ffffffffffffffff821115610c0a57600080fd5b602082029050919050565b600067ffffffffffffffff821115610c2c57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff821115610c5457600080fd5b602082029050919050565b600067ffffffffffffffff821115610c7657600080fd5b602082029050602081019050919050565b600067ffffffffffffffff821115610c9e57600080fd5b602082029050602081019050919050565b600067ffffffffffffffff821115610cc657600080fd5b602082029050602081019050919050565b6000819050602082019050919050565b6000819050919050565b6000819050602082019050919050565b6000819050919050565b6000819050602082019050919050565b6000819050602082019050919050565b6000819050602082019050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b600082825260208201905092915050565b600081905092915050565b600082825260208201905092915050565b600081905092915050565b600082825260208201905092915050565b600082825260208201905092915050565b600082825260208201905092915050565b6000819050919050565b610e6181610e4e565b8114610e6c57600080fd5b5056fea365627a7a72305820405a6336d8c302cee779de6788527018e5a2393892328fbf12b96065df2de00a6c6578706572696d656e74616cf564736f6c634300050a0040`}, + []string{`60806040523480156100115760006000fd5b50610017565b6110b2806100266000396000f3fe60806040523480156100115760006000fd5b50600436106100465760003560e01c8063443c79b41461004c578063d0062cdd14610080578063e4d9a43b1461009c57610046565b60006000fd5b610066600480360361006191908101906107b8565b6100b8565b604051610077959493929190610ccb565b60405180910390f35b61009a600480360361009591908101906107b8565b6100ef565b005b6100b660048036036100b19190810190610775565b610136565b005b6100c061013a565b60606100ca61015e565b606060608989898989945094509450945094506100e2565b9550955095509550959050565b7f18d6e66efa53739ca6d13626f35ebc700b31cced3eddb50c70bbe9c082c6cd008585858585604051610126959493929190610ccb565b60405180910390a15b5050505050565b5b50565b60405180606001604052806000815260200160608152602001606081526020015090565b60405180604001604052806002905b606081526020019060019003908161016d57905050905661106e565b600082601f830112151561019d5760006000fd5b81356101b06101ab82610d6f565b610d41565b915081818352602084019350602081019050838560808402820111156101d65760006000fd5b60005b8381101561020757816101ec888261037a565b8452602084019350608083019250505b6001810190506101d9565b5050505092915050565b600082601f83011215156102255760006000fd5b600261023861023382610d98565b610d41565b9150818360005b83811015610270578135860161025588826103f3565b8452602084019350602083019250505b60018101905061023f565b5050505092915050565b600082601f830112151561028e5760006000fd5b81356102a161029c82610dbb565b610d41565b915081818352602084019350602081019050838560408402820111156102c75760006000fd5b60005b838110156102f857816102dd888261058b565b8452602084019350604083019250505b6001810190506102ca565b5050505092915050565b600082601f83011215156103165760006000fd5b813561032961032482610de4565b610d41565b9150818183526020840193506020810190508360005b83811015610370578135860161035588826105d8565b8452602084019350602083019250505b60018101905061033f565b5050505092915050565b600082601f830112151561038e5760006000fd5b60026103a161039c82610e0d565b610d41565b915081838560408402820111156103b85760006000fd5b60005b838110156103e957816103ce88826106fe565b8452602084019350604083019250505b6001810190506103bb565b5050505092915050565b600082601f83011215156104075760006000fd5b813561041a61041582610e30565b610d41565b915081818352602084019350602081019050838560408402820111156104405760006000fd5b60005b83811015610471578161045688826106fe565b8452602084019350604083019250505b600181019050610443565b5050505092915050565b600082601f830112151561048f5760006000fd5b81356104a261049d82610e59565b610d41565b915081818352602084019350602081019050838560208402820111156104c85760006000fd5b60005b838110156104f957816104de8882610760565b8452602084019350602083019250505b6001810190506104cb565b5050505092915050565b600082601f83011215156105175760006000fd5b813561052a61052582610e82565b610d41565b915081818352602084019350602081019050838560208402820111156105505760006000fd5b60005b8381101561058157816105668882610760565b8452602084019350602083019250505b600181019050610553565b5050505092915050565b60006040828403121561059e5760006000fd5b6105a86040610d41565b905060006105b88482850161074b565b60008301525060206105cc8482850161074b565b60208301525092915050565b6000606082840312156105eb5760006000fd5b6105f56060610d41565b9050600061060584828501610760565b600083015250602082013567ffffffffffffffff8111156106265760006000fd5b6106328482850161047b565b602083015250604082013567ffffffffffffffff8111156106535760006000fd5b61065f848285016103f3565b60408301525092915050565b60006060828403121561067e5760006000fd5b6106886060610d41565b9050600061069884828501610760565b600083015250602082013567ffffffffffffffff8111156106b95760006000fd5b6106c58482850161047b565b602083015250604082013567ffffffffffffffff8111156106e65760006000fd5b6106f2848285016103f3565b60408301525092915050565b6000604082840312156107115760006000fd5b61071b6040610d41565b9050600061072b84828501610760565b600083015250602061073f84828501610760565b60208301525092915050565b60008135905061075a8161103a565b92915050565b60008135905061076f81611054565b92915050565b6000602082840312156107885760006000fd5b600082013567ffffffffffffffff8111156107a35760006000fd5b6107af8482850161027a565b91505092915050565b6000600060006000600060a086880312156107d35760006000fd5b600086013567ffffffffffffffff8111156107ee5760006000fd5b6107fa8882890161066b565b955050602086013567ffffffffffffffff8111156108185760006000fd5b61082488828901610189565b945050604086013567ffffffffffffffff8111156108425760006000fd5b61084e88828901610211565b935050606086013567ffffffffffffffff81111561086c5760006000fd5b61087888828901610302565b925050608086013567ffffffffffffffff8111156108965760006000fd5b6108a288828901610503565b9150509295509295909350565b60006108bb8383610a6a565b60808301905092915050565b60006108d38383610ac2565b905092915050565b60006108e78383610c36565b905092915050565b60006108fb8383610c8d565b60408301905092915050565b60006109138383610cbc565b60208301905092915050565b600061092a82610f0f565b6109348185610fb7565b935061093f83610eab565b8060005b8381101561097157815161095788826108af565b975061096283610f5c565b9250505b600181019050610943565b5085935050505092915050565b600061098982610f1a565b6109938185610fc8565b9350836020820285016109a585610ebb565b8060005b858110156109e257848403895281516109c285826108c7565b94506109cd83610f69565b925060208a019950505b6001810190506109a9565b50829750879550505050505092915050565b60006109ff82610f25565b610a098185610fd3565b935083602082028501610a1b85610ec5565b8060005b85811015610a585784840389528151610a3885826108db565b9450610a4383610f76565b925060208a019950505b600181019050610a1f565b50829750879550505050505092915050565b610a7381610f30565b610a7d8184610fe4565b9250610a8882610ed5565b8060005b83811015610aba578151610aa087826108ef565b9650610aab83610f83565b9250505b600181019050610a8c565b505050505050565b6000610acd82610f3b565b610ad78185610fef565b9350610ae283610edf565b8060005b83811015610b14578151610afa88826108ef565b9750610b0583610f90565b9250505b600181019050610ae6565b5085935050505092915050565b6000610b2c82610f51565b610b368185611011565b9350610b4183610eff565b8060005b83811015610b73578151610b598882610907565b9750610b6483610faa565b9250505b600181019050610b45565b5085935050505092915050565b6000610b8b82610f46565b610b958185611000565b9350610ba083610eef565b8060005b83811015610bd2578151610bb88882610907565b9750610bc383610f9d565b9250505b600181019050610ba4565b5085935050505092915050565b6000606083016000830151610bf76000860182610cbc565b5060208301518482036020860152610c0f8282610b80565b91505060408301518482036040860152610c298282610ac2565b9150508091505092915050565b6000606083016000830151610c4e6000860182610cbc565b5060208301518482036020860152610c668282610b80565b91505060408301518482036040860152610c808282610ac2565b9150508091505092915050565b604082016000820151610ca36000850182610cbc565b506020820151610cb66020850182610cbc565b50505050565b610cc581611030565b82525050565b600060a0820190508181036000830152610ce58188610bdf565b90508181036020830152610cf9818761091f565b90508181036040830152610d0d818661097e565b90508181036060830152610d2181856109f4565b90508181036080830152610d358184610b21565b90509695505050505050565b6000604051905081810181811067ffffffffffffffff82111715610d655760006000fd5b8060405250919050565b600067ffffffffffffffff821115610d875760006000fd5b602082029050602081019050919050565b600067ffffffffffffffff821115610db05760006000fd5b602082029050919050565b600067ffffffffffffffff821115610dd35760006000fd5b602082029050602081019050919050565b600067ffffffffffffffff821115610dfc5760006000fd5b602082029050602081019050919050565b600067ffffffffffffffff821115610e255760006000fd5b602082029050919050565b600067ffffffffffffffff821115610e485760006000fd5b602082029050602081019050919050565b600067ffffffffffffffff821115610e715760006000fd5b602082029050602081019050919050565b600067ffffffffffffffff821115610e9a5760006000fd5b602082029050602081019050919050565b6000819050602082019050919050565b6000819050919050565b6000819050602082019050919050565b6000819050919050565b6000819050602082019050919050565b6000819050602082019050919050565b6000819050602082019050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b600082825260208201905092915050565b600081905092915050565b600082825260208201905092915050565b600081905092915050565b600082825260208201905092915050565b600082825260208201905092915050565b600082825260208201905092915050565b600061ffff82169050919050565b6000819050919050565b61104381611022565b811415156110515760006000fd5b50565b61105d81611030565b8114151561106b5760006000fd5b50565bfea365627a7a72315820d78c6ba7ee332581e6c4d9daa5fc07941841230f7ce49edf6e05b1b63853e8746c6578706572696d656e74616cf564736f6c634300050c0040`}, []string{` - [{"constant":true,"inputs":[{"components":[{"name":"a","type":"uint256"},{"name":"b","type":"uint256[]"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"c","type":"tuple[]"}],"name":"a","type":"tuple"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"b","type":"tuple[2][]"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"c","type":"tuple[][2]"},{"components":[{"name":"a","type":"uint256"},{"name":"b","type":"uint256[]"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"c","type":"tuple[]"}],"name":"d","type":"tuple[]"},{"name":"e","type":"uint256[]"}],"name":"func1","outputs":[{"components":[{"name":"a","type":"uint256"},{"name":"b","type":"uint256[]"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"c","type":"tuple[]"}],"name":"","type":"tuple"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"","type":"tuple[2][]"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"","type":"tuple[][2]"},{"components":[{"name":"a","type":"uint256"},{"name":"b","type":"uint256[]"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"c","type":"tuple[]"}],"name":"","type":"tuple[]"},{"name":"","type":"uint256[]"}],"payable":false,"stateMutability":"pure","type":"function"},{"constant":false,"inputs":[{"components":[{"name":"a","type":"uint256"},{"name":"b","type":"uint256[]"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"c","type":"tuple[]"}],"name":"a","type":"tuple"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"b","type":"tuple[2][]"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"c","type":"tuple[][2]"},{"components":[{"name":"a","type":"uint256"},{"name":"b","type":"uint256[]"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"c","type":"tuple[]"}],"name":"d","type":"tuple[]"},{"name":"e","type":"uint256[]"}],"name":"func2","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"anonymous":false,"inputs":[{"components":[{"name":"a","type":"uint256"},{"name":"b","type":"uint256[]"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"c","type":"tuple[]"}],"indexed":false,"name":"a","type":"tuple"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"indexed":false,"name":"b","type":"tuple[2][]"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"indexed":false,"name":"c","type":"tuple[][2]"},{"components":[{"name":"a","type":"uint256"},{"name":"b","type":"uint256[]"},{"components":[{"name":"x","type":"uint256"},{"name":"y","type":"uint256"}],"name":"c","type":"tuple[]"}],"indexed":false,"name":"d","type":"tuple[]"},{"indexed":false,"name":"e","type":"uint256[]"}],"name":"TupleEvent","type":"event"}] +[{"anonymous":false,"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"indexed":false,"internalType":"struct Tuple.S","name":"a","type":"tuple"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"indexed":false,"internalType":"struct Tuple.T[2][]","name":"b","type":"tuple[2][]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"indexed":false,"internalType":"struct Tuple.T[][2]","name":"c","type":"tuple[][2]"},{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"indexed":false,"internalType":"struct Tuple.S[]","name":"d","type":"tuple[]"},{"indexed":false,"internalType":"uint256[]","name":"e","type":"uint256[]"}],"name":"TupleEvent","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"uint8","name":"x","type":"uint8"},{"internalType":"uint8","name":"y","type":"uint8"}],"indexed":false,"internalType":"struct Tuple.P[]","name":"","type":"tuple[]"}],"name":"TupleEvent2","type":"event"},{"constant":true,"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"internalType":"struct Tuple.S","name":"a","type":"tuple"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[2][]","name":"b","type":"tuple[2][]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[][2]","name":"c","type":"tuple[][2]"},{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"internalType":"struct Tuple.S[]","name":"d","type":"tuple[]"},{"internalType":"uint256[]","name":"e","type":"uint256[]"}],"name":"func1","outputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"internalType":"struct Tuple.S","name":"","type":"tuple"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[2][]","name":"","type":"tuple[2][]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[][2]","name":"","type":"tuple[][2]"},{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"internalType":"struct Tuple.S[]","name":"","type":"tuple[]"},{"internalType":"uint256[]","name":"","type":"uint256[]"}],"payable":false,"stateMutability":"pure","type":"function"},{"constant":false,"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"internalType":"struct Tuple.S","name":"a","type":"tuple"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[2][]","name":"b","type":"tuple[2][]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[][2]","name":"c","type":"tuple[][2]"},{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"internalType":"struct Tuple.S[]","name":"d","type":"tuple[]"},{"internalType":"uint256[]","name":"e","type":"uint256[]"}],"name":"func2","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"components":[{"internalType":"uint16","name":"x","type":"uint16"},{"internalType":"uint16","name":"y","type":"uint16"}],"internalType":"struct Tuple.Q[]","name":"","type":"tuple[]"}],"name":"func3","outputs":[],"payable":false,"stateMutability":"pure","type":"function"}] `}, ` "math/big" @@ -1129,10 +1132,10 @@ var bindTests = []struct { } } - a := Struct1{ + a := TupleS{ A: big.NewInt(1), B: []*big.Int{big.NewInt(2), big.NewInt(3)}, - C: []Struct0{ + C: []TupleT{ { X: big.NewInt(4), Y: big.NewInt(5), @@ -1144,7 +1147,7 @@ var bindTests = []struct { }, } - b := [][2]Struct0{ + b := [][2]TupleT{ { { X: big.NewInt(8), @@ -1157,7 +1160,7 @@ var bindTests = []struct { }, } - c := [2][]Struct0{ + c := [2][]TupleT{ { { X: big.NewInt(12), @@ -1176,7 +1179,7 @@ var bindTests = []struct { }, } - d := []Struct1{a} + d := []TupleS{a} e := []*big.Int{big.NewInt(18), big.NewInt(19)} ret1, ret2, ret3, ret4, ret5, err := contract.Func1(nil, a, b, c, d, e) @@ -1207,6 +1210,11 @@ var bindTests = []struct { check(iter.Event.C, c, "field3 mismatch") check(iter.Event.D, d, "field4 mismatch") check(iter.Event.E, e, "field5 mismatch") + + err = contract.Func3(nil, nil) + if err != nil { + t.Fatalf("failed to call function which has no return, err %v", err) + } `, nil, nil, diff --git a/accounts/abi/bind/template.go b/accounts/abi/bind/template.go index 9293becf8..ac54780d6 100644 --- a/accounts/abi/bind/template.go +++ b/accounts/abi/bind/template.go @@ -65,7 +65,7 @@ type tmplField struct { // tmplStruct is a wrapper around an abi.tuple contains a auto-generated // struct name. type tmplStruct struct { - Name string // Auto-generated struct name(We can't obtain the raw struct name through abi) + Name string // Auto-generated struct name(before solidity v0.5.11) or raw name. Fields []*tmplField // Struct fields definition depends on the binding language. } @@ -483,7 +483,7 @@ var ( // Parse{{.Normalized.Name}} is a log parse operation binding the contract event 0x{{printf "%x" .Original.ID}}. // - // Solidity: {{.Original.String}} + // Solidity: {{formatevent .Original $structs}} func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Parse{{.Normalized.Name}}(log types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) { event := new({{$contract.Type}}{{.Normalized.Name}}) if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil { diff --git a/accounts/abi/bind/topics.go b/accounts/abi/bind/topics.go index c836ce126..99d52287f 100644 --- a/accounts/abi/bind/topics.go +++ b/accounts/abi/bind/topics.go @@ -80,15 +80,19 @@ func makeTopics(query ...[]interface{}) ([][]common.Hash, error) { copy(topic[:], hash[:]) default: + // todo(rjl493456442) according solidity documentation, indexed event + // parameters that are not value types i.e. arrays and structs are not + // stored directly but instead a keccak256-hash of an encoding is stored. + // + // We only convert stringS and bytes to hash, still need to deal with + // array(both fixed-size and dynamic-size) and struct. + // Attempt to generate the topic from funky types val := reflect.ValueOf(rule) - switch { - // static byte array case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8: reflect.Copy(reflect.ValueOf(topic[:val.Len()]), val) - default: return nil, fmt.Errorf("unsupported indexed type: %T", rule) } @@ -162,6 +166,7 @@ func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) er default: // Ran out of plain primitive types, try custom types + switch field.Type() { case reflectHash: // Also covers all dynamic types field.Set(reflect.ValueOf(topics[0])) @@ -178,11 +183,9 @@ func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) er default: // Ran out of custom types, try the crazies switch { - // static byte array case arg.Type.T == abi.FixedBytesTy: reflect.Copy(field, reflect.ValueOf(topics[0][:arg.Type.Size])) - default: return fmt.Errorf("unsupported indexed type: %v", arg.Type) } diff --git a/accounts/abi/bind/topics_test.go b/accounts/abi/bind/topics_test.go index c64f61fb6..421571a1a 100644 --- a/accounts/abi/bind/topics_test.go +++ b/accounts/abi/bind/topics_test.go @@ -59,7 +59,7 @@ func TestParseTopics(t *testing.T) { type bytesStruct struct { StaticBytes [5]byte } - bytesType, _ := abi.NewType("bytes5", nil) + bytesType, _ := abi.NewType("bytes5", "", nil) type args struct { createObj func() interface{} resultObj func() interface{} diff --git a/accounts/abi/pack_test.go b/accounts/abi/pack_test.go index 0dd2744bd..653b9e01a 100644 --- a/accounts/abi/pack_test.go +++ b/accounts/abi/pack_test.go @@ -613,7 +613,7 @@ func TestPack(t *testing.T) { "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].A[1] }, } { - typ, err := NewType(test.typ, test.components) + typ, err := NewType(test.typ, "", test.components) if err != nil { t.Fatalf("%v failed. Unexpected parse error: %v", i, err) } diff --git a/accounts/abi/type.go b/accounts/abi/type.go index 597d31439..4792283ee 100644 --- a/accounts/abi/type.go +++ b/accounts/abi/type.go @@ -53,6 +53,7 @@ type Type struct { stringKind string // holds the unparsed string for deriving signatures // Tuple relative fields + TupleRawName string // Raw struct name defined in source code, may be empty. TupleElems []*Type // Type information of all tuple fields TupleRawNames []string // Raw field name of all tuple fields } @@ -63,7 +64,7 @@ var ( ) // NewType creates a new reflection type of abi type given in t. -func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) { +func NewType(t string, internalType string, components []ArgumentMarshaling) (typ Type, err error) { // check that array brackets are equal if they exist if strings.Count(t, "[") != strings.Count(t, "]") { return Type{}, fmt.Errorf("invalid arg type in abi") @@ -73,9 +74,14 @@ func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) { // if there are brackets, get ready to go into slice/array mode and // recursively create the type if strings.Count(t, "[") != 0 { - i := strings.LastIndex(t, "[") + // Note internalType can be empty here. + subInternal := internalType + if i := strings.LastIndex(internalType, "["); i != -1 { + subInternal = subInternal[:i] + } // recursively embed the type - embeddedType, err := NewType(t[:i], components) + i := strings.LastIndex(t, "[") + embeddedType, err := NewType(t[:i], subInternal, components) if err != nil { return Type{}, err } @@ -173,7 +179,7 @@ func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) { ) expression += "(" for idx, c := range components { - cType, err := NewType(c.Type, c.Components) + cType, err := NewType(c.Type, c.InternalType, c.Components) if err != nil { return Type{}, err } @@ -199,6 +205,17 @@ func NewType(t string, components []ArgumentMarshaling) (typ Type, err error) { typ.TupleRawNames = names typ.T = TupleTy typ.stringKind = expression + + const structPrefix = "struct " + // After solidity 0.5.10, a new field of abi "internalType" + // is introduced. From that we can obtain the struct name + // user defined in the source code. + if internalType != "" && strings.HasPrefix(internalType, structPrefix) { + // Foo.Bar type definition is not allowed in golang, + // convert the format to FooBar + typ.TupleRawName = strings.Replace(internalType[len(structPrefix):], ".", "", -1) + } + case "function": typ.Kind = reflect.Array typ.T = FunctionTy diff --git a/accounts/abi/type_test.go b/accounts/abi/type_test.go index d6d7a3c68..1b31169f9 100644 --- a/accounts/abi/type_test.go +++ b/accounts/abi/type_test.go @@ -106,7 +106,7 @@ func TestTypeRegexp(t *testing.T) { } for _, tt := range tests { - typ, err := NewType(tt.blob, tt.components) + typ, err := NewType(tt.blob, "", tt.components) if err != nil { t.Errorf("type %q: failed to parse type string: %v", tt.blob, err) } @@ -281,7 +281,7 @@ func TestTypeCheck(t *testing.T) { B *big.Int }{{big.NewInt(0), big.NewInt(0)}, {big.NewInt(0), big.NewInt(0)}}, ""}, } { - typ, err := NewType(test.typ, test.components) + typ, err := NewType(test.typ, "", test.components) if err != nil && len(test.err) == 0 { t.Fatal("unexpected parse error:", err) } else if err != nil && len(test.err) != 0 { diff --git a/accounts/abi/unpack_test.go b/accounts/abi/unpack_test.go index 04a5faff5..5fabef637 100644 --- a/accounts/abi/unpack_test.go +++ b/accounts/abi/unpack_test.go @@ -51,6 +51,7 @@ func (test unpackTest) checkError(err error) error { } var unpackTests = []unpackTest{ + // Bools { def: `[{ "type": "bool" }]`, enc: "0000000000000000000000000000000000000000000000000000000000000001", @@ -73,6 +74,7 @@ var unpackTests = []unpackTest{ want: false, err: "abi: improperly encoded boolean value", }, + // Integers { def: `[{"type": "uint32"}]`, enc: "0000000000000000000000000000000000000000000000000000000000000001", @@ -122,11 +124,13 @@ var unpackTests = []unpackTest{ enc: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", want: big.NewInt(-1), }, + // Address { def: `[{"type": "address"}]`, enc: "0000000000000000000000000100000000000000000000000000000000000000", want: common.Address{1}, }, + // Bytes { def: `[{"type": "bytes32"}]`, enc: "0100000000000000000000000000000000000000000000000000000000000000", @@ -154,23 +158,39 @@ var unpackTests = []unpackTest{ enc: "0100000000000000000000000000000000000000000000000000000000000000", want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, }, + // Functions { def: `[{"type": "function"}]`, enc: "0100000000000000000000000000000000000000000000000000000000000000", want: [24]byte{1}, }, - // slices + // Slice and Array { def: `[{"type": "uint8[]"}]`, enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", want: []uint8{1, 2}, }, + { + def: `[{"type": "uint8[]"}]`, + enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000", + want: []uint8{}, + }, + { + def: `[{"type": "uint256[]"}]`, + enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000", + want: []*big.Int{}, + }, { def: `[{"type": "uint8[2]"}]`, enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", want: [2]uint8{1, 2}, }, // multi dimensional, if these pass, all types that don't require length prefix should pass + { + def: `[{"type": "uint8[][]"}]`, + enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000", + want: [][]uint8{}, + }, { def: `[{"type": "uint8[][]"}]`, enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", @@ -186,11 +206,21 @@ var unpackTests = []unpackTest{ enc: "0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", want: [2][2]uint8{{1, 2}, {1, 2}}, }, + { + def: `[{"type": "uint8[][2]"}]`, + enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + want: [2][]uint8{{}, {}}, + }, { def: `[{"type": "uint8[][2]"}]`, enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001", want: [2][]uint8{{1}, {1}}, }, + { + def: `[{"type": "uint8[2][]"}]`, + enc: "00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000", + want: [][2]uint8{}, + }, { def: `[{"type": "uint8[2][]"}]`, enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002", @@ -420,7 +450,7 @@ func TestUnpack(t *testing.T) { } encb, err := hex.DecodeString(test.enc) if err != nil { - t.Fatalf("invalid hex: %s" + test.enc) + t.Fatalf("invalid hex %s: %v", test.enc, err) } outptr := reflect.New(reflect.TypeOf(test.want)) err = abi.Unpack(outptr.Interface(), "method", encb) diff --git a/appveyor.yml b/appveyor.yml index a8aec76b6..9d5bcb51c 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -23,8 +23,8 @@ environment: install: - git submodule update --init - rmdir C:\go /s /q - - appveyor DownloadFile https://dl.google.com/go/go1.13.windows-%GETH_ARCH%.zip - - 7z x go1.13.windows-%GETH_ARCH%.zip -y -oC:\ > NUL + - appveyor DownloadFile https://dl.google.com/go/go1.13.4.windows-%GETH_ARCH%.zip + - 7z x go1.13.4.windows-%GETH_ARCH%.zip -y -oC:\ > NUL - go version - gcc --version diff --git a/build/ci-notes.md b/build/ci-notes.md index 13e1fd230..edd9adc1c 100644 --- a/build/ci-notes.md +++ b/build/ci-notes.md @@ -22,19 +22,18 @@ variables `PPA_SIGNING_KEY` and `PPA_SSH_KEY` on Travis. We want to build go-ethereum with the most recent version of Go, irrespective of the Go version that is available in the main Ubuntu repository. In order to make this possible, -our PPA depends on the ~gophers/ubuntu/archive PPA. Our source package build-depends on -golang-1.11, which is co-installable alongside the regular golang package. PPA dependencies -can be edited at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies +we bundle the entire Go sources into our own source archive and start the built job by +compiling Go and then using that to build go-ethereum. On Trusty we have a special case +requiring the `~gophers/ubuntu/archive` PPA since Trusty can't even build Go itself. PPA +deps are set at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies ## Building Packages Locally (for testing) You need to run Ubuntu to do test packaging. -Add the gophers PPA and install Go 1.11 and Debian packaging tools: +Install any version of Go and Debian packaging tools: - $ sudo apt-add-repository ppa:gophers/ubuntu/archive - $ sudo apt-get update - $ sudo apt-get install build-essential golang-1.11 devscripts debhelper python-bzrlib python-paramiko + $ sudo apt-get install build-essential golang-go devscripts debhelper python-bzrlib python-paramiko Create the source packages: @@ -42,10 +41,10 @@ Create the source packages: Then go into the source package directory for your running distribution and build the package: - $ cd dist/ethereum-unstable-1.6.0+xenial + $ cd dist/ethereum-unstable-1.9.6+bionic $ dpkg-buildpackage Built packages are placed in the dist/ directory. $ cd .. - $ dpkg-deb -c geth-unstable_1.6.0+xenial_amd64.deb + $ dpkg-deb -c geth-unstable_1.9.6+bionic_amd64.deb diff --git a/build/ci.go b/build/ci.go index ba25f670a..74cc82112 100644 --- a/build/ci.go +++ b/build/ci.go @@ -58,6 +58,7 @@ import ( "strings" "time" + "github.com/ethersocial/go-ethersocial/common/hexutil" "github.com/ethersocial/go-ethersocial/internal/build" "github.com/ethersocial/go-ethersocial/params" ) @@ -148,7 +149,18 @@ var ( // Note: zesty is unsupported because it was officially deprecated on Launchpad. // Note: artful is unsupported because it was officially deprecated on Launchpad. // Note: cosmic is unsupported because it was officially deprecated on Launchpad. - debDistros = []string{"trusty", "xenial", "bionic", "disco", "eoan"} + debDistroGoBoots = map[string]string{ + "trusty": "golang-1.11", + "xenial": "golang-go", + "bionic": "golang-go", + "disco": "golang-go", + "eoan": "golang-go", + } + + debGoBootPaths = map[string]string{ + "golang-1.11": "/usr/lib/go-1.11", + "golang-go": "/usr/lib/go", + } ) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) @@ -478,11 +490,14 @@ func maybeSkipArchive(env build.Environment) { // Debian Packaging func doDebianSource(cmdline []string) { var ( - signer = flag.String("signer", "", `Signing key name, also used as package author`) - upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`) - sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`) - workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`) - now = time.Now() + goversion = flag.String("goversion", "", `Go version to build with (will be included in the source package)`) + gobundle = flag.String("gobundle", "/tmp/go.tar.gz", `Filesystem path to cache the downloaded Go bundles at`) + gohash = flag.String("gohash", "", `SHA256 checksum of the Go sources requested to build with`) + signer = flag.String("signer", "", `Signing key name, also used as package author`) + upload = flag.String("upload", "", `Where to upload the source package (usually "ethereum/ethereum")`) + sshUser = flag.String("sftp-user", "", `Username for SFTP upload (usually "geth-ci")`) + workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`) + now = time.Now() ) flag.CommandLine.Parse(cmdline) *workdir = makeWorkdir(*workdir) @@ -495,12 +510,25 @@ func doDebianSource(cmdline []string) { gpg.Stdin = bytes.NewReader(key) build.MustRun(gpg) } - + // Download and verify the Go source package + if err := build.EnsureGoSources(*goversion, hexutil.MustDecode("0x"+*gohash), *gobundle); err != nil { + log.Fatalf("Failed to ensure Go source package: %v", err) + } // Create Debian packages and upload them for _, pkg := range debPackages { - for _, distro := range debDistros { - meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables) + for distro, goboot := range debDistroGoBoots { + // Prepare the debian package with the go-ethereum sources + meta := newDebMetadata(distro, goboot, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables) pkgdir := stageDebianSource(*workdir, meta) + + // Ship the Go sources along so we have a proper thing to build with + if err := build.ExtractTarballArchive(*gobundle, pkgdir); err != nil { + log.Fatalf("Failed to extract Go sources: %v", err) + } + if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".go")); err != nil { + log.Fatalf("Failed to rename Go source folder: %v", err) + } + // Run the packaging and upload to the PPA debuild := exec.Command("debuild", "-S", "-sa", "-us", "-uc", "-d", "-Zxz") debuild.Dir = pkgdir build.MustRun(debuild) @@ -580,7 +608,9 @@ type debPackage struct { } type debMetadata struct { - Env build.Environment + Env build.Environment + GoBootPackage string + GoBootPath string PackageName string @@ -609,19 +639,21 @@ func (d debExecutable) Package() string { return d.BinaryName } -func newDebMetadata(distro, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata { +func newDebMetadata(distro, goboot, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata { if author == "" { // No signing key, use default author. author = "Ethereum Builds " } return debMetadata{ - PackageName: name, - Env: env, - Author: author, - Distro: distro, - Version: version, - Time: t.Format(time.RFC1123Z), - Executables: exes, + GoBootPackage: goboot, + GoBootPath: debGoBootPaths[goboot], + PackageName: name, + Env: env, + Author: author, + Distro: distro, + Version: version, + Time: t.Format(time.RFC1123Z), + Executables: exes, } } @@ -686,7 +718,6 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) { if err := os.Mkdir(pkgdir, 0755); err != nil { log.Fatal(err) } - // Copy the source code. build.MustRunCommand("git", "checkout-index", "-a", "--prefix", pkgdir+string(filepath.Separator)) @@ -704,7 +735,6 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) { build.Render("build/deb/"+meta.PackageName+"/deb.install", install, 0644, exe) build.Render("build/deb/"+meta.PackageName+"/deb.docs", docs, 0644, exe) } - return pkgdir } diff --git a/build/deb/ethereum/deb.control b/build/deb/ethereum/deb.control index 74e1cf1e0..d7b4d51fd 100644 --- a/build/deb/ethereum/deb.control +++ b/build/deb/ethereum/deb.control @@ -2,7 +2,7 @@ Source: {{.Name}} Section: science Priority: extra Maintainer: {{.Author}} -Build-Depends: debhelper (>= 8.0.0), golang-1.11 +Build-Depends: debhelper (>= 8.0.0), {{.GoBootPackage}} Standards-Version: 3.9.5 Homepage: https://ethereum.org Vcs-Git: git://github.com/ethersocial/go-ethersocial.git diff --git a/build/deb/ethereum/deb.rules b/build/deb/ethereum/deb.rules index 5280e0e55..1370a52f1 100644 --- a/build/deb/ethereum/deb.rules +++ b/build/deb/ethereum/deb.rules @@ -6,9 +6,11 @@ # Launchpad rejects Go's access to $HOME/.cache, use custom folder export GOCACHE=/tmp/go-build +export GOROOT_BOOTSTRAP={{.GoBootPath}} override_dh_auto_build: - build/env.sh /usr/lib/go-1.11/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}} + (cd .go/src && ./make.bash) + build/env.sh .go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}} override_dh_auto_test: diff --git a/cmd/clef/main.go b/cmd/clef/main.go index 372e9d509..2ed828f27 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -404,6 +404,27 @@ func initialize(c *cli.Context) error { return nil } +// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into +// account the set data folders as well as the designated platform we're currently +// running on. +func ipcEndpoint(ipcPath, datadir string) string { + // On windows we can only use plain top-level pipes + if runtime.GOOS == "windows" { + if strings.HasPrefix(ipcPath, `\\.\pipe\`) { + return ipcPath + } + return `\\.\pipe\` + ipcPath + } + // Resolve names into the data directory full paths otherwise + if filepath.Base(ipcPath) == ipcPath { + if datadir == "" { + return filepath.Join(os.TempDir(), ipcPath) + } + return filepath.Join(datadir, ipcPath) + } + return ipcPath +} + func signer(c *cli.Context) error { // If we have some unrecognized command, bail out if args := c.Args(); len(args) > 0 { @@ -532,12 +553,8 @@ func signer(c *cli.Context) error { }() } if !c.GlobalBool(utils.IPCDisabledFlag.Name) { - if c.IsSet(utils.IPCPathFlag.Name) { - ipcapiURL = c.GlobalString(utils.IPCPathFlag.Name) - } else { - ipcapiURL = filepath.Join(configDir, "clef.ipc") - } - + givenPath := c.GlobalString(utils.IPCPathFlag.Name) + ipcapiURL = ipcEndpoint(filepath.Join(givenPath, "clef.ipc"), configDir) listener, _, err := rpc.StartIPCEndpoint(ipcapiURL, rpcAPI) if err != nil { utils.Fatalf("Could not start IPC api: %v", err) @@ -547,7 +564,6 @@ func signer(c *cli.Context) error { listener.Close() log.Info("IPC endpoint closed", "url", ipcapiURL) }() - } if c.GlobalBool(testFlag.Name) { diff --git a/cmd/devp2p/crawl.go b/cmd/devp2p/crawl.go new file mode 100644 index 000000000..272ae462d --- /dev/null +++ b/cmd/devp2p/crawl.go @@ -0,0 +1,152 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "time" + + "github.com/ethersocial/go-ethersocial/log" + "github.com/ethersocial/go-ethersocial/p2p/discover" + "github.com/ethersocial/go-ethersocial/p2p/enode" +) + +type crawler struct { + input nodeSet + output nodeSet + disc *discover.UDPv4 + iters []enode.Iterator + inputIter enode.Iterator + ch chan *enode.Node + closed chan struct{} + + // settings + revalidateInterval time.Duration +} + +func newCrawler(input nodeSet, disc *discover.UDPv4, iters ...enode.Iterator) *crawler { + c := &crawler{ + input: input, + output: make(nodeSet, len(input)), + disc: disc, + iters: iters, + inputIter: enode.IterNodes(input.nodes()), + ch: make(chan *enode.Node), + closed: make(chan struct{}), + } + c.iters = append(c.iters, c.inputIter) + // Copy input to output initially. Any nodes that fail validation + // will be dropped from output during the run. + for id, n := range input { + c.output[id] = n + } + return c +} + +func (c *crawler) run(timeout time.Duration) nodeSet { + var ( + timeoutTimer = time.NewTimer(timeout) + timeoutCh <-chan time.Time + doneCh = make(chan enode.Iterator, len(c.iters)) + liveIters = len(c.iters) + ) + for _, it := range c.iters { + go c.runIterator(doneCh, it) + } + +loop: + for { + select { + case n := <-c.ch: + c.updateNode(n) + case it := <-doneCh: + if it == c.inputIter { + // Enable timeout when we're done revalidating the input nodes. + log.Info("Revalidation of input set is done", "len", len(c.input)) + if timeout > 0 { + timeoutCh = timeoutTimer.C + } + } + if liveIters--; liveIters == 0 { + break loop + } + case <-timeoutCh: + break loop + } + } + + close(c.closed) + for _, it := range c.iters { + it.Close() + } + for ; liveIters > 0; liveIters-- { + <-doneCh + } + return c.output +} + +func (c *crawler) runIterator(done chan<- enode.Iterator, it enode.Iterator) { + defer func() { done <- it }() + for it.Next() { + select { + case c.ch <- it.Node(): + case <-c.closed: + return + } + } +} + +func (c *crawler) updateNode(n *enode.Node) { + node, ok := c.output[n.ID()] + + // Skip validation of recently-seen nodes. + if ok && time.Since(node.LastCheck) < c.revalidateInterval { + return + } + + // Request the node record. + nn, err := c.disc.RequestENR(n) + node.LastCheck = truncNow() + if err != nil { + if node.Score == 0 { + // Node doesn't implement EIP-868. + log.Debug("Skipping node", "id", n.ID()) + return + } + node.Score /= 2 + } else { + node.N = nn + node.Seq = nn.Seq() + node.Score++ + if node.FirstResponse.IsZero() { + node.FirstResponse = node.LastCheck + } + node.LastResponse = node.LastCheck + } + + // Store/update node in output set. + if node.Score <= 0 { + log.Info("Removing node", "id", n.ID()) + delete(c.output, n.ID()) + } else { + log.Info("Updating node", "id", n.ID(), "seq", n.Seq(), "score", node.Score) + c.output[n.ID()] = node + } +} + +func truncNow() time.Time { + return time.Now().UTC().Truncate(1 * time.Second) +} diff --git a/cmd/devp2p/discv4cmd.go b/cmd/devp2p/discv4cmd.go index bfdb67b14..217693bd1 100644 --- a/cmd/devp2p/discv4cmd.go +++ b/cmd/devp2p/discv4cmd.go @@ -39,6 +39,7 @@ var ( discv4RequestRecordCommand, discv4ResolveCommand, discv4ResolveJSONCommand, + discv4CrawlCommand, }, } discv4PingCommand = cli.Command{ @@ -67,12 +68,25 @@ var ( Flags: []cli.Flag{bootnodesFlag}, ArgsUsage: "", } + discv4CrawlCommand = cli.Command{ + Name: "crawl", + Usage: "Updates a nodes.json file with random nodes found in the DHT", + Action: discv4Crawl, + Flags: []cli.Flag{bootnodesFlag, crawlTimeoutFlag}, + } ) -var bootnodesFlag = cli.StringFlag{ - Name: "bootnodes", - Usage: "Comma separated nodes used for bootstrapping", -} +var ( + bootnodesFlag = cli.StringFlag{ + Name: "bootnodes", + Usage: "Comma separated nodes used for bootstrapping", + } + crawlTimeoutFlag = cli.DurationFlag{ + Name: "timeout", + Usage: "Time limit for the crawl.", + Value: 30 * time.Minute, + } +) func discv4Ping(ctx *cli.Context) error { n := getNodeArg(ctx) @@ -113,30 +127,48 @@ func discv4ResolveJSON(ctx *cli.Context) error { if ctx.NArg() < 1 { return fmt.Errorf("need nodes file as argument") } - disc := startV4(ctx) - defer disc.Close() - file := ctx.Args().Get(0) - - // Load existing nodes in file. - var nodes []*enode.Node - if common.FileExist(file) { - nodes = loadNodesJSON(file).nodes() + nodesFile := ctx.Args().Get(0) + inputSet := make(nodeSet) + if common.FileExist(nodesFile) { + inputSet = loadNodesJSON(nodesFile) } - // Add nodes from command line arguments. + + // Add extra nodes from command line arguments. + var nodeargs []*enode.Node for i := 1; i < ctx.NArg(); i++ { n, err := parseNode(ctx.Args().Get(i)) if err != nil { exit(err) } - nodes = append(nodes, n) + nodeargs = append(nodeargs, n) } - result := make(nodeSet, len(nodes)) - for _, n := range nodes { - n = disc.Resolve(n) - result[n.ID()] = nodeJSON{Seq: n.Seq(), N: n} + // Run the crawler. + disc := startV4(ctx) + defer disc.Close() + c := newCrawler(inputSet, disc, enode.IterNodes(nodeargs)) + c.revalidateInterval = 0 + output := c.run(0) + writeNodesJSON(nodesFile, output) + return nil +} + +func discv4Crawl(ctx *cli.Context) error { + if ctx.NArg() < 1 { + return fmt.Errorf("need nodes file as argument") + } + nodesFile := ctx.Args().First() + var inputSet nodeSet + if common.FileExist(nodesFile) { + inputSet = loadNodesJSON(nodesFile) } - writeNodesJSON(file, result) + + disc := startV4(ctx) + defer disc.Close() + c := newCrawler(inputSet, disc, disc.RandomNodes()) + c.revalidateInterval = 10 * time.Minute + output := c.run(ctx.Duration(crawlTimeoutFlag.Name)) + writeNodesJSON(nodesFile, output) return nil } diff --git a/cmd/devp2p/dnscmd.go b/cmd/devp2p/dnscmd.go index ee8c0bc2f..79e8b5939 100644 --- a/cmd/devp2p/dnscmd.go +++ b/cmd/devp2p/dnscmd.go @@ -109,7 +109,8 @@ func dnsSync(ctx *cli.Context) error { } def := treeToDefinition(url, t) def.Meta.LastModified = time.Now() - writeTreeDefinition(outdir, def) + writeTreeMetadata(outdir, def) + writeTreeNodes(outdir, def) return nil } @@ -151,7 +152,7 @@ func dnsSign(ctx *cli.Context) error { def = treeToDefinition(url, t) def.Meta.LastModified = time.Now() - writeTreeDefinition(defdir, def) + writeTreeMetadata(defdir, def) return nil } @@ -315,26 +316,28 @@ func ensureValidTreeSignature(t *dnsdisc.Tree, pubkey *ecdsa.PublicKey, sig stri return nil } -// writeTreeDefinition writes a DNS node tree definition to the given directory. -func writeTreeDefinition(directory string, def *dnsDefinition) { +// writeTreeMetadata writes a DNS node tree metadata file to the given directory. +func writeTreeMetadata(directory string, def *dnsDefinition) { metaJSON, err := json.MarshalIndent(&def.Meta, "", jsonIndent) if err != nil { exit(err) } - // Convert nodes. - nodes := make(nodeSet, len(def.Nodes)) - nodes.add(def.Nodes...) - // Write. if err := os.Mkdir(directory, 0744); err != nil && !os.IsExist(err) { exit(err) } - metaFile, nodesFile := treeDefinitionFiles(directory) - writeNodesJSON(nodesFile, nodes) + metaFile, _ := treeDefinitionFiles(directory) if err := ioutil.WriteFile(metaFile, metaJSON, 0644); err != nil { exit(err) } } +func writeTreeNodes(directory string, def *dnsDefinition) { + ns := make(nodeSet, len(def.Nodes)) + ns.add(def.Nodes...) + _, nodesFile := treeDefinitionFiles(directory) + writeNodesJSON(nodesFile, ns) +} + func treeDefinitionFiles(directory string) (string, string) { meta := filepath.Join(directory, "enrtree-info.json") nodes := filepath.Join(directory, "nodes.json") diff --git a/cmd/devp2p/main.go b/cmd/devp2p/main.go index 20e6eebbb..6acc210c6 100644 --- a/cmd/devp2p/main.go +++ b/cmd/devp2p/main.go @@ -60,6 +60,7 @@ func init() { enrdumpCommand, discv4Command, dnsCommand, + nodesetCommand, } } diff --git a/cmd/devp2p/nodeset.go b/cmd/devp2p/nodeset.go index e9c50631a..326b921b5 100644 --- a/cmd/devp2p/nodeset.go +++ b/cmd/devp2p/nodeset.go @@ -21,7 +21,9 @@ import ( "encoding/json" "fmt" "io/ioutil" + "os" "sort" + "time" "github.com/ethersocial/go-ethersocial/common" "github.com/ethersocial/go-ethersocial/p2p/enode" @@ -36,6 +38,15 @@ type nodeSet map[enode.ID]nodeJSON type nodeJSON struct { Seq uint64 `json:"seq"` N *enode.Node `json:"record"` + + // The score tracks how many liveness checks were performed. It is incremented by one + // every time the node passes a check, and halved every time it doesn't. + Score int `json:"score,omitempty"` + // These two track the time of last successful contact. + FirstResponse time.Time `json:"firstResponse,omitempty"` + LastResponse time.Time `json:"lastResponse,omitempty"` + // This one tracks the time of our last attempt to contact the node. + LastCheck time.Time `json:"lastCheck,omitempty"` } func loadNodesJSON(file string) nodeSet { @@ -51,6 +62,10 @@ func writeNodesJSON(file string, nodes nodeSet) { if err != nil { exit(err) } + if file == "-" { + os.Stdout.Write(nodesJSON) + return + } if err := ioutil.WriteFile(file, nodesJSON, 0644); err != nil { exit(err) } diff --git a/cmd/devp2p/nodesetcmd.go b/cmd/devp2p/nodesetcmd.go new file mode 100644 index 000000000..79a4f2338 --- /dev/null +++ b/cmd/devp2p/nodesetcmd.go @@ -0,0 +1,193 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "fmt" + "net" + "time" + + "github.com/ethersocial/go-ethersocial/core/forkid" + "github.com/ethersocial/go-ethersocial/p2p/enr" + "github.com/ethersocial/go-ethersocial/params" + "github.com/ethersocial/go-ethersocial/rlp" + "gopkg.in/urfave/cli.v1" +) + +var ( + nodesetCommand = cli.Command{ + Name: "nodeset", + Usage: "Node set tools", + Subcommands: []cli.Command{ + nodesetInfoCommand, + nodesetFilterCommand, + }, + } + nodesetInfoCommand = cli.Command{ + Name: "info", + Usage: "Shows statistics about a node set", + Action: nodesetInfo, + ArgsUsage: "", + } + nodesetFilterCommand = cli.Command{ + Name: "filter", + Usage: "Filters a node set", + Action: nodesetFilter, + ArgsUsage: " filters..", + + SkipFlagParsing: true, + } +) + +func nodesetInfo(ctx *cli.Context) error { + if ctx.NArg() < 1 { + return fmt.Errorf("need nodes file as argument") + } + + ns := loadNodesJSON(ctx.Args().First()) + fmt.Printf("Set contains %d nodes.\n", len(ns)) + return nil +} + +func nodesetFilter(ctx *cli.Context) error { + if ctx.NArg() < 1 { + return fmt.Errorf("need nodes file as argument") + } + ns := loadNodesJSON(ctx.Args().First()) + filter, err := andFilter(ctx.Args().Tail()) + if err != nil { + return err + } + + result := make(nodeSet) + for id, n := range ns { + if filter(n) { + result[id] = n + } + } + writeNodesJSON("-", result) + return nil +} + +type nodeFilter func(nodeJSON) bool + +type nodeFilterC struct { + narg int + fn func([]string) (nodeFilter, error) +} + +var filterFlags = map[string]nodeFilterC{ + "-ip": {1, ipFilter}, + "-min-age": {1, minAgeFilter}, + "-eth-network": {1, ethFilter}, + "-les-server": {0, lesFilter}, +} + +func parseFilters(args []string) ([]nodeFilter, error) { + var filters []nodeFilter + for len(args) > 0 { + fc, ok := filterFlags[args[0]] + if !ok { + return nil, fmt.Errorf("invalid filter %q", args[0]) + } + if len(args) < fc.narg { + return nil, fmt.Errorf("filter %q wants %d arguments, have %d", args[0], fc.narg, len(args)) + } + filter, err := fc.fn(args[1:]) + if err != nil { + return nil, fmt.Errorf("%s: %v", args[0], err) + } + filters = append(filters, filter) + args = args[fc.narg+1:] + } + return filters, nil +} + +func andFilter(args []string) (nodeFilter, error) { + checks, err := parseFilters(args) + if err != nil { + return nil, err + } + f := func(n nodeJSON) bool { + for _, filter := range checks { + if !filter(n) { + return false + } + } + return true + } + return f, nil +} + +func ipFilter(args []string) (nodeFilter, error) { + _, cidr, err := net.ParseCIDR(args[0]) + if err != nil { + return nil, err + } + f := func(n nodeJSON) bool { return cidr.Contains(n.N.IP()) } + return f, nil +} + +func minAgeFilter(args []string) (nodeFilter, error) { + minage, err := time.ParseDuration(args[0]) + if err != nil { + return nil, err + } + f := func(n nodeJSON) bool { + age := n.LastResponse.Sub(n.FirstResponse) + return age >= minage + } + return f, nil +} + +func ethFilter(args []string) (nodeFilter, error) { + var filter forkid.Filter + switch args[0] { + case "mainnet": + filter = forkid.NewStaticFilter(params.MainnetChainConfig, params.MainnetGenesisHash) + case "rinkeby": + filter = forkid.NewStaticFilter(params.RinkebyChainConfig, params.RinkebyGenesisHash) + case "goerli": + filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash) + case "ropsten": + filter = forkid.NewStaticFilter(params.TestnetChainConfig, params.TestnetGenesisHash) + default: + return nil, fmt.Errorf("unknown network %q", args[0]) + } + + f := func(n nodeJSON) bool { + var eth struct { + ForkID forkid.ID + _ []rlp.RawValue `rlp:"tail"` + } + if n.N.Load(enr.WithEntry("eth", ð)) != nil { + return false + } + return filter(eth.ForkID) == nil + } + return f, nil +} + +func lesFilter(args []string) (nodeFilter, error) { + f := func(n nodeJSON) bool { + var les struct { + _ []rlp.RawValue `rlp:"tail"` + } + return n.N.Load(enr.WithEntry("les", &les)) == nil + } + return f, nil +} diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index 88f2c9fb6..cb5ca3ddb 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -17,6 +17,7 @@ package main import ( + "bytes" "encoding/json" "fmt" "io/ioutil" @@ -145,6 +146,7 @@ func runCmd(ctx *cli.Context) error { } else { hexcode = []byte(codeFlag) } + hexcode = bytes.TrimSpace(hexcode) if len(hexcode)%2 != 0 { fmt.Printf("Invalid input length for hex data (%d)\n", len(hexcode)) os.Exit(1) diff --git a/cmd/puppeth/genesis.go b/cmd/puppeth/genesis.go index 031273a9a..9ecf07cbb 100644 --- a/cmd/puppeth/genesis.go +++ b/cmd/puppeth/genesis.go @@ -36,25 +36,27 @@ import ( type alethGenesisSpec struct { SealEngine string `json:"sealEngine"` Params struct { - AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"` - MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"` - HomesteadForkBlock hexutil.Uint64 `json:"homesteadForkBlock"` - DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"` - EIP150ForkBlock hexutil.Uint64 `json:"EIP150ForkBlock"` - EIP158ForkBlock hexutil.Uint64 `json:"EIP158ForkBlock"` - ByzantiumForkBlock hexutil.Uint64 `json:"byzantiumForkBlock"` - ConstantinopleForkBlock hexutil.Uint64 `json:"constantinopleForkBlock"` - MinGasLimit hexutil.Uint64 `json:"minGasLimit"` - MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"` - TieBreakingGas bool `json:"tieBreakingGas"` - GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"` - MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"` - DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"` - DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"` - BlockReward *hexutil.Big `json:"blockReward"` - NetworkID hexutil.Uint64 `json:"networkID"` - ChainID hexutil.Uint64 `json:"chainID"` - AllowFutureBlocks bool `json:"allowFutureBlocks"` + AccountStartNonce math2.HexOrDecimal64 `json:"accountStartNonce"` + MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"` + HomesteadForkBlock *hexutil.Big `json:"homesteadForkBlock,omitempty"` + DaoHardforkBlock math2.HexOrDecimal64 `json:"daoHardforkBlock"` + EIP150ForkBlock *hexutil.Big `json:"EIP150ForkBlock,omitempty"` + EIP158ForkBlock *hexutil.Big `json:"EIP158ForkBlock,omitempty"` + ByzantiumForkBlock *hexutil.Big `json:"byzantiumForkBlock,omitempty"` + ConstantinopleForkBlock *hexutil.Big `json:"constantinopleForkBlock,omitempty"` + ConstantinopleFixForkBlock *hexutil.Big `json:"constantinopleFixForkBlock,omitempty"` + IstanbulForkBlock *hexutil.Big `json:"istanbulForkBlock,omitempty"` + MinGasLimit hexutil.Uint64 `json:"minGasLimit"` + MaxGasLimit hexutil.Uint64 `json:"maxGasLimit"` + TieBreakingGas bool `json:"tieBreakingGas"` + GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"` + MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"` + DifficultyBoundDivisor *math2.HexOrDecimal256 `json:"difficultyBoundDivisor"` + DurationLimit *math2.HexOrDecimal256 `json:"durationLimit"` + BlockReward *hexutil.Big `json:"blockReward"` + NetworkID hexutil.Uint64 `json:"networkID"` + ChainID hexutil.Uint64 `json:"chainID"` + AllowFutureBlocks bool `json:"allowFutureBlocks"` } `json:"params"` Genesis struct { @@ -74,7 +76,7 @@ type alethGenesisSpec struct { // alethGenesisSpecAccount is the prefunded genesis account and/or precompiled // contract definition. type alethGenesisSpecAccount struct { - Balance *math2.HexOrDecimal256 `json:"balance"` + Balance *math2.HexOrDecimal256 `json:"balance,omitempty"` Nonce uint64 `json:"nonce,omitempty"` Precompiled *alethGenesisSpecBuiltin `json:"precompiled,omitempty"` } @@ -82,7 +84,7 @@ type alethGenesisSpecAccount struct { // alethGenesisSpecBuiltin is the precompiled contract definition. type alethGenesisSpecBuiltin struct { Name string `json:"name,omitempty"` - StartingBlock hexutil.Uint64 `json:"startingBlock,omitempty"` + StartingBlock *hexutil.Big `json:"startingBlock,omitempty"` Linear *alethGenesisSpecLinearPricing `json:"linear,omitempty"` } @@ -106,21 +108,33 @@ func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSp spec.Params.AccountStartNonce = 0 spec.Params.TieBreakingGas = false spec.Params.AllowFutureBlocks = false - spec.Params.DaoHardforkBlock = 0 - spec.Params.HomesteadForkBlock = (hexutil.Uint64)(genesis.Config.HomesteadBlock.Uint64()) - spec.Params.EIP150ForkBlock = (hexutil.Uint64)(genesis.Config.EIP150Block.Uint64()) - spec.Params.EIP158ForkBlock = (hexutil.Uint64)(genesis.Config.EIP158Block.Uint64()) + // Dao hardfork block is a special one. The fork block is listed as 0 in the + // config but aleth will sync with ETC clients up until the actual dao hard + // fork block. + spec.Params.DaoHardforkBlock = 0 - // Byzantium + if num := genesis.Config.HomesteadBlock; num != nil { + spec.Params.HomesteadForkBlock = (*hexutil.Big)(num) + } + if num := genesis.Config.EIP150Block; num != nil { + spec.Params.EIP150ForkBlock = (*hexutil.Big)(num) + } + if num := genesis.Config.EIP158Block; num != nil { + spec.Params.EIP158ForkBlock = (*hexutil.Big)(num) + } if num := genesis.Config.ByzantiumBlock; num != nil { - spec.setByzantium(num) + spec.Params.ByzantiumForkBlock = (*hexutil.Big)(num) } - // Constantinople if num := genesis.Config.ConstantinopleBlock; num != nil { - spec.setConstantinople(num) + spec.Params.ConstantinopleForkBlock = (*hexutil.Big)(num) + } + if num := genesis.Config.PetersburgBlock; num != nil { + spec.Params.ConstantinopleFixForkBlock = (*hexutil.Big)(num) + } + if num := genesis.Config.IstanbulBlock; num != nil { + spec.Params.IstanbulForkBlock = (*hexutil.Big)(num) } - spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64()) spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64()) spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize) @@ -157,15 +171,32 @@ func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSp Linear: &alethGenesisSpecLinearPricing{Base: 15, Word: 3}}) if genesis.Config.ByzantiumBlock != nil { spec.setPrecompile(5, &alethGenesisSpecBuiltin{Name: "modexp", - StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())}) + StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock)}) spec.setPrecompile(6, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_add", - StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), + StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock), Linear: &alethGenesisSpecLinearPricing{Base: 500}}) spec.setPrecompile(7, &alethGenesisSpecBuiltin{Name: "alt_bn128_G1_mul", - StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()), + StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock), Linear: &alethGenesisSpecLinearPricing{Base: 40000}}) spec.setPrecompile(8, &alethGenesisSpecBuiltin{Name: "alt_bn128_pairing_product", - StartingBlock: (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64())}) + StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock)}) + } + if genesis.Config.IstanbulBlock != nil { + if genesis.Config.ByzantiumBlock == nil { + return nil, errors.New("invalid genesis, istanbul fork is enabled while byzantium is not") + } + spec.setPrecompile(6, &alethGenesisSpecBuiltin{ + Name: "alt_bn128_G1_add", + StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock), + }) // Aleth hardcoded the gas policy + spec.setPrecompile(7, &alethGenesisSpecBuiltin{ + Name: "alt_bn128_G1_mul", + StartingBlock: (*hexutil.Big)(genesis.Config.ByzantiumBlock), + }) // Aleth hardcoded the gas policy + spec.setPrecompile(9, &alethGenesisSpecBuiltin{ + Name: "blake2_compression", + StartingBlock: (*hexutil.Big)(genesis.Config.IstanbulBlock), + }) } return spec, nil } @@ -196,14 +227,6 @@ func (spec *alethGenesisSpec) setAccount(address common.Address, account core.Ge } -func (spec *alethGenesisSpec) setByzantium(num *big.Int) { - spec.Params.ByzantiumForkBlock = hexutil.Uint64(num.Uint64()) -} - -func (spec *alethGenesisSpec) setConstantinople(num *big.Int) { - spec.Params.ConstantinopleForkBlock = hexutil.Uint64(num.Uint64()) -} - // parityChainSpec is the chain specification format used by Parity. type parityChainSpec struct { Name string `json:"name"` @@ -223,29 +246,33 @@ type parityChainSpec struct { } `json:"engine"` Params struct { - AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"` - MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"` - MinGasLimit hexutil.Uint64 `json:"minGasLimit"` - GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"` - NetworkID hexutil.Uint64 `json:"networkID"` - ChainID hexutil.Uint64 `json:"chainID"` - MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"` - MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"` - EIP98Transition hexutil.Uint64 `json:"eip98Transition"` - EIP150Transition hexutil.Uint64 `json:"eip150Transition"` - EIP160Transition hexutil.Uint64 `json:"eip160Transition"` - EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"` - EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"` - EIP155Transition hexutil.Uint64 `json:"eip155Transition"` - EIP140Transition hexutil.Uint64 `json:"eip140Transition"` - EIP211Transition hexutil.Uint64 `json:"eip211Transition"` - EIP214Transition hexutil.Uint64 `json:"eip214Transition"` - EIP658Transition hexutil.Uint64 `json:"eip658Transition"` - EIP145Transition hexutil.Uint64 `json:"eip145Transition"` - EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"` - EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"` - EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"` - EIP1283DisableTransition hexutil.Uint64 `json:"eip1283DisableTransition"` + AccountStartNonce hexutil.Uint64 `json:"accountStartNonce"` + MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"` + MinGasLimit hexutil.Uint64 `json:"minGasLimit"` + GasLimitBoundDivisor math2.HexOrDecimal64 `json:"gasLimitBoundDivisor"` + NetworkID hexutil.Uint64 `json:"networkID"` + ChainID hexutil.Uint64 `json:"chainID"` + MaxCodeSize hexutil.Uint64 `json:"maxCodeSize"` + MaxCodeSizeTransition hexutil.Uint64 `json:"maxCodeSizeTransition"` + EIP98Transition hexutil.Uint64 `json:"eip98Transition"` + EIP150Transition hexutil.Uint64 `json:"eip150Transition"` + EIP160Transition hexutil.Uint64 `json:"eip160Transition"` + EIP161abcTransition hexutil.Uint64 `json:"eip161abcTransition"` + EIP161dTransition hexutil.Uint64 `json:"eip161dTransition"` + EIP155Transition hexutil.Uint64 `json:"eip155Transition"` + EIP140Transition hexutil.Uint64 `json:"eip140Transition"` + EIP211Transition hexutil.Uint64 `json:"eip211Transition"` + EIP214Transition hexutil.Uint64 `json:"eip214Transition"` + EIP658Transition hexutil.Uint64 `json:"eip658Transition"` + EIP145Transition hexutil.Uint64 `json:"eip145Transition"` + EIP1014Transition hexutil.Uint64 `json:"eip1014Transition"` + EIP1052Transition hexutil.Uint64 `json:"eip1052Transition"` + EIP1283Transition hexutil.Uint64 `json:"eip1283Transition"` + EIP1283DisableTransition hexutil.Uint64 `json:"eip1283DisableTransition"` + EIP1283ReenableTransition hexutil.Uint64 `json:"eip1283ReenableTransition"` + EIP1344Transition hexutil.Uint64 `json:"eip1344Transition"` + EIP1884Transition hexutil.Uint64 `json:"eip1884Transition"` + EIP2028Transition hexutil.Uint64 `json:"eip2028Transition"` } `json:"params"` Genesis struct { @@ -278,17 +305,22 @@ type parityChainSpecAccount struct { // parityChainSpecBuiltin is the precompiled contract definition. type parityChainSpecBuiltin struct { - Name string `json:"name,omitempty"` - ActivateAt math2.HexOrDecimal64 `json:"activate_at,omitempty"` - Pricing *parityChainSpecPricing `json:"pricing,omitempty"` + Name string `json:"name"` // Each builtin should has it own name + Pricing *parityChainSpecPricing `json:"pricing"` // Each builtin should has it own price strategy + ActivateAt *hexutil.Big `json:"activate_at,omitempty"` // ActivateAt can't be omitted if empty, default means no fork + EIP1108Transition *hexutil.Big `json:"eip1108_transition,omitempty"` // EIP1108Transition can't be omitted if empty, default means no fork } // parityChainSpecPricing represents the different pricing models that builtin // contracts might advertise using. type parityChainSpecPricing struct { - Linear *parityChainSpecLinearPricing `json:"linear,omitempty"` - ModExp *parityChainSpecModExpPricing `json:"modexp,omitempty"` - AltBnPairing *parityChainSpecAltBnPairingPricing `json:"alt_bn128_pairing,omitempty"` + Linear *parityChainSpecLinearPricing `json:"linear,omitempty"` + ModExp *parityChainSpecModExpPricing `json:"modexp,omitempty"` + AltBnPairing *parityChainSpecAltBnPairingPricing `json:"alt_bn128_pairing,omitempty"` + AltBnConstOperation *parityChainSpecAltBnConstOperationPricing `json:"alt_bn128_const_operations,omitempty"` + + // Blake2F is the price per round of Blake2 compression + Blake2F *parityChainSpecBlakePricing `json:"blake2_f,omitempty"` } type parityChainSpecLinearPricing struct { @@ -300,9 +332,20 @@ type parityChainSpecModExpPricing struct { Divisor uint64 `json:"divisor"` } +type parityChainSpecAltBnConstOperationPricing struct { + Price uint64 `json:"price"` + EIP1108TransitionPrice uint64 `json:"eip1108_transition_price,omitempty"` // Before Istanbul fork, this field is nil +} + type parityChainSpecAltBnPairingPricing struct { - Base uint64 `json:"base"` - Pair uint64 `json:"pair"` + Base uint64 `json:"base"` + Pair uint64 `json:"pair"` + EIP1108TransitionBase uint64 `json:"eip1108_transition_base,omitempty"` // Before Istanbul fork, this field is nil + EIP1108TransitionPair uint64 `json:"eip1108_transition_pair,omitempty"` // Before Istanbul fork, this field is nil +} + +type parityChainSpecBlakePricing struct { + GasPerRound uint64 `json:"gas_per_round"` } // newParityChainSpec converts a go-ethereum genesis block into a Parity specific @@ -352,7 +395,10 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin if num := genesis.Config.PetersburgBlock; num != nil { spec.setConstantinopleFix(num) } - + // Istanbul + if num := genesis.Config.IstanbulBlock; num != nil { + spec.setIstanbul(num) + } spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize) spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit) spec.Params.GasLimitBoundDivisor = (math2.HexOrDecimal64)(params.GasLimitBoundDivisor) @@ -398,18 +444,34 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin Name: "identity", Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 15, Word: 3}}, }) if genesis.Config.ByzantiumBlock != nil { - blnum := math2.HexOrDecimal64(genesis.Config.ByzantiumBlock.Uint64()) spec.setPrecompile(5, &parityChainSpecBuiltin{ - Name: "modexp", ActivateAt: blnum, Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}}, + Name: "modexp", ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock), Pricing: &parityChainSpecPricing{ModExp: &parityChainSpecModExpPricing{Divisor: 20}}, }) spec.setPrecompile(6, &parityChainSpecBuiltin{ - Name: "alt_bn128_add", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 500}}, + Name: "alt_bn128_add", ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock), Pricing: &parityChainSpecPricing{AltBnConstOperation: &parityChainSpecAltBnConstOperationPricing{Price: 500}}, }) spec.setPrecompile(7, &parityChainSpecBuiltin{ - Name: "alt_bn128_mul", ActivateAt: blnum, Pricing: &parityChainSpecPricing{Linear: &parityChainSpecLinearPricing{Base: 40000}}, + Name: "alt_bn128_mul", ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock), Pricing: &parityChainSpecPricing{AltBnConstOperation: &parityChainSpecAltBnConstOperationPricing{Price: 40000}}, }) spec.setPrecompile(8, &parityChainSpecBuiltin{ - Name: "alt_bn128_pairing", ActivateAt: blnum, Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}}, + Name: "alt_bn128_pairing", ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock), Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000}}, + }) + } + if genesis.Config.IstanbulBlock != nil { + if genesis.Config.ByzantiumBlock == nil { + return nil, errors.New("invalid genesis, istanbul fork is enabled while byzantium is not") + } + spec.setPrecompile(6, &parityChainSpecBuiltin{ + Name: "alt_bn128_add", ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock), EIP1108Transition: (*hexutil.Big)(genesis.Config.IstanbulBlock), Pricing: &parityChainSpecPricing{AltBnConstOperation: &parityChainSpecAltBnConstOperationPricing{Price: 500, EIP1108TransitionPrice: 150}}, + }) + spec.setPrecompile(7, &parityChainSpecBuiltin{ + Name: "alt_bn128_mul", ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock), EIP1108Transition: (*hexutil.Big)(genesis.Config.IstanbulBlock), Pricing: &parityChainSpecPricing{AltBnConstOperation: &parityChainSpecAltBnConstOperationPricing{Price: 40000, EIP1108TransitionPrice: 6000}}, + }) + spec.setPrecompile(8, &parityChainSpecBuiltin{ + Name: "alt_bn128_pairing", ActivateAt: (*hexutil.Big)(genesis.Config.ByzantiumBlock), EIP1108Transition: (*hexutil.Big)(genesis.Config.IstanbulBlock), Pricing: &parityChainSpecPricing{AltBnPairing: &parityChainSpecAltBnPairingPricing{Base: 100000, Pair: 80000, EIP1108TransitionBase: 45000, EIP1108TransitionPair: 34000}}, + }) + spec.setPrecompile(9, &parityChainSpecBuiltin{ + Name: "blake2_f", ActivateAt: (*hexutil.Big)(genesis.Config.IstanbulBlock), Pricing: &parityChainSpecPricing{Blake2F: &parityChainSpecBlakePricing{GasPerRound: 1}}, }) } return spec, nil @@ -451,6 +513,15 @@ func (spec *parityChainSpec) setConstantinopleFix(num *big.Int) { spec.Params.EIP1283DisableTransition = hexutil.Uint64(num.Uint64()) } +func (spec *parityChainSpec) setIstanbul(num *big.Int) { + // spec.Params.EIP152Transition = hexutil.Uint64(num.Uint64()) + // spec.Params.EIP1108Transition = hexutil.Uint64(num.Uint64()) + spec.Params.EIP1344Transition = hexutil.Uint64(num.Uint64()) + spec.Params.EIP1884Transition = hexutil.Uint64(num.Uint64()) + spec.Params.EIP2028Transition = hexutil.Uint64(num.Uint64()) + spec.Params.EIP1283ReenableTransition = hexutil.Uint64(num.Uint64()) +} + // pyEthereumGenesisSpec represents the genesis specification format used by the // Python Ethereum implementation. type pyEthereumGenesisSpec struct { diff --git a/cmd/puppeth/genesis_test.go b/cmd/puppeth/genesis_test.go index b1826037e..e149463ee 100644 --- a/cmd/puppeth/genesis_test.go +++ b/cmd/puppeth/genesis_test.go @@ -76,7 +76,7 @@ func TestParitySturebyConverter(t *testing.T) { if err := json.Unmarshal(blob, &genesis); err != nil { t.Fatalf("failed parsing genesis: %v", err) } - spec, err := newParityChainSpec("Stureby", &genesis, []string{}) + spec, err := newParityChainSpec("stureby", &genesis, []string{}) if err != nil { t.Fatalf("failed creating chainspec: %v", err) } diff --git a/cmd/puppeth/testdata/stureby_aleth.json b/cmd/puppeth/testdata/stureby_aleth.json index 1ef1d8ae1..d18ba3854 100644 --- a/cmd/puppeth/testdata/stureby_aleth.json +++ b/cmd/puppeth/testdata/stureby_aleth.json @@ -1,112 +1,113 @@ { - "sealEngine":"Ethash", - "params":{ - "accountStartNonce":"0x00", - "maximumExtraDataSize":"0x20", - "homesteadForkBlock":"0x2710", - "daoHardforkBlock":"0x00", - "EIP150ForkBlock":"0x3a98", - "EIP158ForkBlock":"0x59d8", - "byzantiumForkBlock":"0x7530", - "constantinopleForkBlock":"0x9c40", - "minGasLimit":"0x1388", - "maxGasLimit":"0x7fffffffffffffff", - "tieBreakingGas":false, - "gasLimitBoundDivisor":"0x0400", - "minimumDifficulty":"0x20000", - "difficultyBoundDivisor":"0x0800", - "durationLimit":"0x0d", - "blockReward":"0x4563918244F40000", - "networkID":"0x4cb2e", - "chainID":"0x4cb2e", - "allowFutureBlocks":false + "sealEngine": "Ethash", + "params": { + "accountStartNonce": "0x0", + "maximumExtraDataSize": "0x20", + "homesteadForkBlock": "0x2710", + "daoHardforkBlock": "0x0", + "EIP150ForkBlock": "0x3a98", + "EIP158ForkBlock": "0x59d8", + "byzantiumForkBlock": "0x7530", + "constantinopleForkBlock": "0x9c40", + "constantinopleFixForkBlock": "0x9c40", + "istanbulForkBlock": "0xc350", + "minGasLimit": "0x1388", + "maxGasLimit": "0x7fffffffffffffff", + "tieBreakingGas": false, + "gasLimitBoundDivisor": "0x400", + "minimumDifficulty": "0x20000", + "difficultyBoundDivisor": "0x800", + "durationLimit": "0xd", + "blockReward": "0x4563918244f40000", + "networkID": "0x4cb2e", + "chainID": "0x4cb2e", + "allowFutureBlocks": false }, - "genesis":{ - "nonce":"0x0000000000000000", - "difficulty":"0x20000", - "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "author":"0x0000000000000000000000000000000000000000", - "timestamp":"0x59a4e76d", - "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee", - "gasLimit":"0x47b760" + "genesis": { + "nonce": "0x0000000000000000", + "difficulty": "0x20000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "author": "0x0000000000000000000000000000000000000000", + "timestamp": "0x59a4e76d", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee", + "gasLimit": "0x47b760" }, - "accounts":{ - "0000000000000000000000000000000000000001":{ - "balance":"1", - "precompiled":{ - "name":"ecrecover", - "linear":{ - "base":3000, - "word":0 + "accounts": { + "0000000000000000000000000000000000000001": { + "balance": "0x1", + "precompiled": { + "name": "ecrecover", + "linear": { + "base": 3000, + "word": 0 } } }, - "0000000000000000000000000000000000000002":{ - "balance":"1", - "precompiled":{ - "name":"sha256", - "linear":{ - "base":60, - "word":12 + "0000000000000000000000000000000000000002": { + "balance": "0x1", + "precompiled": { + "name": "sha256", + "linear": { + "base": 60, + "word": 12 } } }, - "0000000000000000000000000000000000000003":{ - "balance":"1", - "precompiled":{ - "name":"ripemd160", - "linear":{ - "base":600, - "word":120 + "0000000000000000000000000000000000000003": { + "balance": "0x1", + "precompiled": { + "name": "ripemd160", + "linear": { + "base": 600, + "word": 120 } } }, - "0000000000000000000000000000000000000004":{ - "balance":"1", - "precompiled":{ - "name":"identity", - "linear":{ - "base":15, - "word":3 + "0000000000000000000000000000000000000004": { + "balance": "0x1", + "precompiled": { + "name": "identity", + "linear": { + "base": 15, + "word": 3 } } }, - "0000000000000000000000000000000000000005":{ - "balance":"1", - "precompiled":{ - "name":"modexp", - "startingBlock":"0x7530" + "0000000000000000000000000000000000000005": { + "balance": "0x1", + "precompiled": { + "name": "modexp", + "startingBlock": "0x7530" } }, - "0000000000000000000000000000000000000006":{ - "balance":"1", - "precompiled":{ - "name":"alt_bn128_G1_add", - "startingBlock":"0x7530", - "linear":{ - "base":500, - "word":0 - } + "0000000000000000000000000000000000000006": { + "balance": "0x1", + "precompiled": { + "name": "alt_bn128_G1_add", + "startingBlock": "0x7530" } }, - "0000000000000000000000000000000000000007":{ - "balance":"1", - "precompiled":{ - "name":"alt_bn128_G1_mul", - "startingBlock":"0x7530", - "linear":{ - "base":40000, - "word":0 - } + "0000000000000000000000000000000000000007": { + "balance": "0x1", + "precompiled": { + "name": "alt_bn128_G1_mul", + "startingBlock": "0x7530" + } + }, + "0000000000000000000000000000000000000008": { + "balance": "0x1", + "precompiled": { + "name": "alt_bn128_pairing_product", + "startingBlock": "0x7530" } }, - "0000000000000000000000000000000000000008":{ - "balance":"1", - "precompiled":{ - "name":"alt_bn128_pairing_product", - "startingBlock":"0x7530" + "0000000000000000000000000000000000000009": { + "balance": "0x1", + "precompiled": { + "name": "blake2_compression", + "startingBlock": "0xc350" } } } -} +} \ No newline at end of file diff --git a/cmd/puppeth/testdata/stureby_geth.json b/cmd/puppeth/testdata/stureby_geth.json index c8c3b3c95..79f03469a 100644 --- a/cmd/puppeth/testdata/stureby_geth.json +++ b/cmd/puppeth/testdata/stureby_geth.json @@ -1,6 +1,5 @@ { "config": { - "ethash":{}, "chainId": 314158, "homesteadBlock": 10000, "eip150Block": 15000, @@ -8,11 +7,13 @@ "eip155Block": 23000, "eip158Block": 23000, "byzantiumBlock": 30000, - "constantinopleBlock": 40000 + "constantinopleBlock": 40000, + "petersburgBlock": 40000, + "istanbulBlock": 50000, + "ethash": {} }, "nonce": "0x0", "timestamp": "0x59a4e76d", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee", "gasLimit": "0x47b760", "difficulty": "0x20000", @@ -20,28 +21,34 @@ "coinbase": "0x0000000000000000000000000000000000000000", "alloc": { "0000000000000000000000000000000000000001": { - "balance": "0x01" + "balance": "0x1" }, "0000000000000000000000000000000000000002": { - "balance": "0x01" + "balance": "0x1" }, "0000000000000000000000000000000000000003": { - "balance": "0x01" + "balance": "0x1" }, "0000000000000000000000000000000000000004": { - "balance": "0x01" + "balance": "0x1" }, "0000000000000000000000000000000000000005": { - "balance": "0x01" + "balance": "0x1" }, "0000000000000000000000000000000000000006": { - "balance": "0x01" + "balance": "0x1" }, "0000000000000000000000000000000000000007": { - "balance": "0x01" + "balance": "0x1" }, "0000000000000000000000000000000000000008": { - "balance": "0x01" + "balance": "0x1" + }, + "0000000000000000000000000000000000000009": { + "balance": "0x1" } - } -} + }, + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/cmd/puppeth/testdata/stureby_parity.json b/cmd/puppeth/testdata/stureby_parity.json index f3fa8386a..fb84b39e2 100644 --- a/cmd/puppeth/testdata/stureby_parity.json +++ b/cmd/puppeth/testdata/stureby_parity.json @@ -1,181 +1,186 @@ { - "name":"Stureby", - "dataDir":"stureby", - "engine":{ - "Ethash":{ - "params":{ - "minimumDifficulty":"0x20000", - "difficultyBoundDivisor":"0x800", - "durationLimit":"0xd", - "blockReward":{ - "0x0":"0x4563918244f40000", - "0x7530":"0x29a2241af62c0000", - "0x9c40":"0x1bc16d674ec80000" + "name": "stureby", + "dataDir": "stureby", + "engine": { + "Ethash": { + "params": { + "minimumDifficulty": "0x20000", + "difficultyBoundDivisor": "0x800", + "durationLimit": "0xd", + "blockReward": { + "0x0": "0x4563918244f40000", + "0x7530": "0x29a2241af62c0000", + "0x9c40": "0x1bc16d674ec80000" }, - "homesteadTransition":"0x2710", - "eip100bTransition":"0x7530", - "difficultyBombDelays":{ - "0x7530":"0x2dc6c0", - "0x9c40":"0x1e8480" - } + "difficultyBombDelays": { + "0x7530": "0x2dc6c0", + "0x9c40": "0x1e8480" + }, + "homesteadTransition": "0x2710", + "eip100bTransition": "0x7530" } } }, - "params":{ - "accountStartNonce":"0x0", - "maximumExtraDataSize":"0x20", - "gasLimitBoundDivisor":"0x400", - "minGasLimit":"0x1388", - "networkID":"0x4cb2e", - "chainID":"0x4cb2e", - "maxCodeSize":"0x6000", - "maxCodeSizeTransition":"0x0", + "params": { + "accountStartNonce": "0x0", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1388", + "gasLimitBoundDivisor": "0x400", + "networkID": "0x4cb2e", + "chainID": "0x4cb2e", + "maxCodeSize": "0x6000", + "maxCodeSizeTransition": "0x0", "eip98Transition": "0x7fffffffffffffff", - "eip150Transition":"0x3a98", - "eip160Transition":"0x59d8", - "eip161abcTransition":"0x59d8", - "eip161dTransition":"0x59d8", - "eip155Transition":"0x59d8", - "eip140Transition":"0x7530", - "eip211Transition":"0x7530", - "eip214Transition":"0x7530", - "eip658Transition":"0x7530", - "eip145Transition":"0x9c40", - "eip1014Transition":"0x9c40", - "eip1052Transition":"0x9c40", - "eip1283Transition":"0x9c40" + "eip150Transition": "0x3a98", + "eip160Transition": "0x59d8", + "eip161abcTransition": "0x59d8", + "eip161dTransition": "0x59d8", + "eip155Transition": "0x59d8", + "eip140Transition": "0x7530", + "eip211Transition": "0x7530", + "eip214Transition": "0x7530", + "eip658Transition": "0x7530", + "eip145Transition": "0x9c40", + "eip1014Transition": "0x9c40", + "eip1052Transition": "0x9c40", + "eip1283Transition": "0x9c40", + "eip1283DisableTransition": "0x9c40", + "eip1283ReenableTransition": "0xc350", + "eip1344Transition": "0xc350", + "eip1884Transition": "0xc350", + "eip2028Transition": "0xc350" }, - "genesis":{ - "seal":{ - "ethereum":{ - "nonce":"0x0000000000000000", - "mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000" + "genesis": { + "seal": { + "ethereum": { + "nonce": "0x0000000000000000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, - "difficulty":"0x20000", - "author":"0x0000000000000000000000000000000000000000", - "timestamp":"0x59a4e76d", - "parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000", - "extraData":"0x0000000000000000000000000000000000000000000000000000000b4dc0ffee", - "gasLimit":"0x47b760" + "difficulty": "0x20000", + "author": "0x0000000000000000000000000000000000000000", + "timestamp": "0x59a4e76d", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "0x0000000000000000000000000000000000000000000000000000000b4dc0ffee", + "gasLimit": "0x47b760" }, - "nodes":[ - "enode://dfa7aca3f5b635fbfe7d0b20575f25e40d9e27b4bfbb3cf74364a42023ad9f25c1a4383bcc8cced86ee511a7d03415345a4df05be37f1dff040e4c780699f1c0@168.61.153.255:31303", - "enode://ef441b20dd70aeabf0eac35c3b8a2854e5ce04db0e30be9152ea9fd129359dcbb3f803993303ff5781c755dfd7223f3fe43505f583cccb740949407677412ba9@40.74.91.252:31303", - "enode://953b5ea1c8987cf46008232a0160324fd00d41320ecf00e23af86ec8f5396b19eb57ddab37c78141be56f62e9077de4f4dfa0747fa768ed8c8531bbfb1046237@40.70.214.166:31303", - "enode://276e613dd4b277a66591e565711e6c8bb107f0905248a9f8f8228c1a87992e156e5114bb9937c02824a9d9d25f76340442cf86e2028bf5293cae19904fb2b98e@35.178.251.52:30303", - "enode://064c820d41e52ed7d426ac64b60506c2998235bedc7e67cb497c6faf7bb4fc54fe56fc82d0add3180b747c0c4f40a1108a6f84d7d0629ed606d504528e61cc57@3.8.5.3:30303", - "enode://90069fdabcc5e684fa5d59430bebbb12755d9362dfe5006a1485b13d71a78a3812d36e74dd7d88e50b51add01e097ea80f16263aeaa4f0230db6c79e2a97e7ca@217.29.191.142:30303", - "enode://0aac74b7fd28726275e466acb5e03bc88a95927e9951eb66b5efb239b2f798ada0690853b2f2823fe4efa408f0f3d4dd258430bc952a5ff70677b8625b3e3b14@40.115.33.57:40404", - "enode://0b96415a10f835106d83e090a0528eed5e7887e5c802a6d084e9f1993a9d0fc713781e6e4101f6365e9b91259712f291acc0a9e6e667e22023050d602c36fbe2@40.115.33.57:40414" - ], - "accounts":{ - "0000000000000000000000000000000000000001":{ - "balance":"1", - "nonce":"0", - "builtin":{ - "name":"ecrecover", - "pricing":{ - "linear":{ - "base":3000, - "word":0 + "nodes": [], + "accounts": { + "0000000000000000000000000000000000000001": { + "balance": "0x1", + "builtin": { + "name": "ecrecover", + "pricing": { + "linear": { + "base": 3000, + "word": 0 } } } }, - "0000000000000000000000000000000000000002":{ - "balance":"1", - "nonce":"0", - "builtin":{ - "name":"sha256", - "pricing":{ - "linear":{ - "base":60, - "word":12 + "0000000000000000000000000000000000000002": { + "balance": "0x1", + "builtin": { + "name": "sha256", + "pricing": { + "linear": { + "base": 60, + "word": 12 } } } }, - "0000000000000000000000000000000000000003":{ - "balance":"1", - "nonce":"0", - "builtin":{ - "name":"ripemd160", - "pricing":{ - "linear":{ - "base":600, - "word":120 + "0000000000000000000000000000000000000003": { + "balance": "0x1", + "builtin": { + "name": "ripemd160", + "pricing": { + "linear": { + "base": 600, + "word": 120 } } } }, - "0000000000000000000000000000000000000004":{ - "balance":"1", - "nonce":"0", - "builtin":{ - "name":"identity", - "pricing":{ - "linear":{ - "base":15, - "word":3 + "0000000000000000000000000000000000000004": { + "balance": "0x1", + "builtin": { + "name": "identity", + "pricing": { + "linear": { + "base": 15, + "word": 3 } } } }, - "0000000000000000000000000000000000000005":{ - "balance":"1", - "nonce":"0", - "builtin":{ - "name":"modexp", - "activate_at":"0x7530", - "pricing":{ - "modexp":{ - "divisor":20 + "0000000000000000000000000000000000000005": { + "balance": "0x1", + "builtin": { + "name": "modexp", + "pricing": { + "modexp": { + "divisor": 20 } - } + }, + "activate_at": "0x7530" } }, - "0000000000000000000000000000000000000006":{ - "balance":"1", - "nonce":"0", - "builtin":{ - "name":"alt_bn128_add", - "activate_at":"0x7530", - "pricing":{ - "linear":{ - "base":500, - "word":0 + "0000000000000000000000000000000000000006": { + "balance": "0x1", + "builtin": { + "name": "alt_bn128_add", + "pricing": { + "alt_bn128_const_operations": { + "price": 500, + "eip1108_transition_price": 150 } - } + }, + "activate_at": "0x7530", + "eip1108_transition": "0xc350" } }, - "0000000000000000000000000000000000000007":{ - "balance":"1", - "nonce":"0", - "builtin":{ - "name":"alt_bn128_mul", - "activate_at":"0x7530", - "pricing":{ - "linear":{ - "base":40000, - "word":0 + "0000000000000000000000000000000000000007": { + "balance": "0x1", + "builtin": { + "name": "alt_bn128_mul", + "pricing": { + "alt_bn128_const_operations": { + "price": 40000, + "eip1108_transition_price": 6000 } - } + }, + "activate_at": "0x7530", + "eip1108_transition": "0xc350" } }, - "0000000000000000000000000000000000000008":{ - "balance":"1", - "nonce":"0", - "builtin":{ - "name":"alt_bn128_pairing", - "activate_at":"0x7530", - "pricing":{ - "alt_bn128_pairing":{ - "base":100000, - "pair":80000 + "0000000000000000000000000000000000000008": { + "balance": "0x1", + "builtin": { + "name": "alt_bn128_pairing", + "pricing": { + "alt_bn128_pairing": { + "base": 100000, + "pair": 80000, + "eip1108_transition_base": 45000, + "eip1108_transition_pair": 34000 } - } + }, + "activate_at": "0x7530", + "eip1108_transition": "0xc350" + } + }, + "0000000000000000000000000000000000000009": { + "balance": "0x1", + "builtin": { + "name": "blake2_f", + "pricing": { + "blake2_f": { + "gas_per_round": 1 + } + }, + "activate_at": "0xc350" } } } -} +} \ No newline at end of file diff --git a/cmd/puppeth/wizard_genesis.go b/cmd/puppeth/wizard_genesis.go index e1b278cd3..2a5034336 100644 --- a/cmd/puppeth/wizard_genesis.go +++ b/cmd/puppeth/wizard_genesis.go @@ -51,6 +51,7 @@ func (w *wizard) makeGenesis() { ByzantiumBlock: big.NewInt(0), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), }, } // Figure out which consensus engine to choose @@ -230,6 +231,10 @@ func (w *wizard) manageGenesis() { fmt.Printf("Which block should Petersburg come into effect? (default = %v)\n", w.conf.Genesis.Config.PetersburgBlock) w.conf.Genesis.Config.PetersburgBlock = w.readDefaultBigInt(w.conf.Genesis.Config.PetersburgBlock) + fmt.Println() + fmt.Printf("Which block should Istanbul come into effect? (default = %v)\n", w.conf.Genesis.Config.IstanbulBlock) + w.conf.Genesis.Config.IstanbulBlock = w.readDefaultBigInt(w.conf.Genesis.Config.IstanbulBlock) + out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ") fmt.Printf("Chain configuration updated:\n\n%s\n", out) @@ -268,7 +273,7 @@ func (w *wizard) manageGenesis() { } else { saveGenesis(folder, w.network, "parity", spec) } - // Export the genesis spec used by Harmony (formerly EthereumJ + // Export the genesis spec used by Harmony (formerly EthereumJ) saveGenesis(folder, w.network, "harmony", w.conf.Genesis) case "3": @@ -291,7 +296,7 @@ func (w *wizard) manageGenesis() { func saveGenesis(folder, network, client string, spec interface{}) { path := filepath.Join(folder, fmt.Sprintf("%s-%s.json", network, client)) - out, _ := json.Marshal(spec) + out, _ := json.MarshalIndent(spec, "", " ") if err := ioutil.WriteFile(path, out, 0644); err != nil { log.Error("Failed to save genesis file", "client", client, "err", err) return diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 21730f1ca..cea32d801 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1464,9 +1464,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) } - cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive" - cfg.NoPrefetch = ctx.GlobalBool(CacheNoPrefetchFlag.Name) - + if ctx.GlobalIsSet(GCModeFlag.Name) { + cfg.NoPruning = ctx.GlobalString(GCModeFlag.Name) == "archive" + } + if ctx.GlobalIsSet(CacheNoPrefetchFlag.Name) { + cfg.NoPrefetch = ctx.GlobalBool(CacheNoPrefetchFlag.Name) + } if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) { cfg.TrieCleanCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheTrieFlag.Name) / 100 } diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 18ad55468..f5a99902e 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -311,7 +311,7 @@ func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *type if number == 0 { return nil } - // Ensure that the block's timestamp isn't too close to it's parent + // Ensure that the block's timestamp isn't too close to its parent var parent *types.Header if len(parents) > 0 { parent = parents[len(parents)-1] @@ -522,7 +522,7 @@ func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) erro // Set the correct difficulty header.Difficulty = CalcDifficulty(snap, c.signer) - // Ensure the extra data has all it's components + // Ensure the extra data has all its components if len(header.Extra) < extraVanity { header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-len(header.Extra))...) } diff --git a/consensus/errors.go b/consensus/errors.go index a005c5f63..ac5242fb5 100644 --- a/consensus/errors.go +++ b/consensus/errors.go @@ -31,7 +31,7 @@ var ( // to the current node. ErrFutureBlock = errors.New("block in the future") - // ErrInvalidNumber is returned if a block's number doesn't equal it's parent's + // ErrInvalidNumber is returned if a block's number doesn't equal its parent's // plus one. ErrInvalidNumber = errors.New("invalid block number") ) diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 7ccf7ff4f..59ddb2183 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -89,7 +89,7 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.He if ethash.config.PowMode == ModeFullFake { return nil } - // Short circuit if the header is known, or it's parent not + // Short circuit if the header is known, or its parent not number := header.Number.Uint64() if chain.GetHeader(header.Hash(), number) != nil { return nil @@ -255,7 +255,7 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent * if header.Time <= parent.Time { return errZeroBlockTime } - // Verify the block's difficulty based in it's timestamp and parent's difficulty + // Verify the block's difficulty based in its timestamp and parent's difficulty expected := ethash.CalcDifficulty(chain, header.Time, parent) if expected.Cmp(header.Difficulty) != 0 { diff --git a/core/asm/compiler.go b/core/asm/compiler.go index 40533fc24..99f896e81 100644 --- a/core/asm/compiler.go +++ b/core/asm/compiler.go @@ -57,6 +57,7 @@ func NewCompiler(debug bool) *Compiler { // second stage to push labels and determine the right // position. func (c *Compiler) Feed(ch <-chan token) { + var prev token for i := range ch { switch i.typ { case number: @@ -73,10 +74,14 @@ func (c *Compiler) Feed(ch <-chan token) { c.labels[i.text] = c.pc c.pc++ case label: - c.pc += 5 + c.pc += 4 + if prev.typ == element && isJump(prev.text) { + c.pc++ + } } c.tokens = append(c.tokens, i) + prev = i } if c.debug { fmt.Fprintln(os.Stderr, "found", len(c.labels), "labels") @@ -181,6 +186,8 @@ func (c *Compiler) compileElement(element token) error { pos := big.NewInt(int64(c.labels[rvalue.text])).Bytes() pos = append(make([]byte, 4-len(pos)), pos...) c.pushBin(pos) + case lineEnd: + c.pos-- default: return compileErr(rvalue, rvalue.text, "number, string or label") } @@ -201,8 +208,8 @@ func (c *Compiler) compileElement(element token) error { case stringValue: value = []byte(rvalue.text[1 : len(rvalue.text)-1]) case label: - value = make([]byte, 4) - copy(value, big.NewInt(int64(c.labels[rvalue.text])).Bytes()) + value = big.NewInt(int64(c.labels[rvalue.text])).Bytes() + value = append(make([]byte, 4-len(value)), value...) default: return compileErr(rvalue, rvalue.text, "number, string or label") } diff --git a/core/asm/compiler_test.go b/core/asm/compiler_test.go new file mode 100644 index 000000000..ce9df436b --- /dev/null +++ b/core/asm/compiler_test.go @@ -0,0 +1,71 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package asm + +import ( + "testing" +) + +func TestCompiler(t *testing.T) { + tests := []struct { + input, output string + }{ + { + input: ` + GAS + label: + PUSH @label +`, + output: "5a5b6300000001", + }, + { + input: ` + PUSH @label + label: +`, + output: "63000000055b", + }, + { + input: ` + PUSH @label + JUMP + label: +`, + output: "6300000006565b", + }, + { + input: ` + JUMP @label + label: +`, + output: "6300000006565b", + }, + } + for _, test := range tests { + ch := Lex([]byte(test.input), false) + c := NewCompiler(false) + c.Feed(ch) + output, err := c.Compile() + if len(err) != 0 { + t.Errorf("compile error: %v\ninput: %s", err, test.input) + continue + } + if output != test.output { + t.Errorf("incorrect output\ninput: %sgot: %s\nwant: %s\n", test.input, output, test.output) + } + } +} diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 6062e1746..555ac73c3 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -1318,7 +1318,7 @@ func TestEIP155Transition(t *testing.T) { funds = big.NewInt(1000000000) deleteAddr = common.Address{1} gspec = &Genesis{ - Config: ¶ms.ChainConfig{ChainID: big.NewInt(1), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)}, + Config: ¶ms.ChainConfig{ChainID: big.NewInt(1), EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}}, } genesis = gspec.MustCommit(db) @@ -1389,7 +1389,7 @@ func TestEIP155Transition(t *testing.T) { } // generate an invalid chain id transaction - config := ¶ms.ChainConfig{ChainID: big.NewInt(2), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)} + config := ¶ms.ChainConfig{ChainID: big.NewInt(2), EIP150Block: big.NewInt(0), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)} blocks, _ = GenerateChain(config, blocks[len(blocks)-1], ethash.NewFaker(), db, 4, func(i int, block *BlockGen) { var ( tx *types.Transaction @@ -1425,6 +1425,7 @@ func TestEIP161AccountRemoval(t *testing.T) { ChainID: big.NewInt(1), HomesteadBlock: new(big.Int), EIP155Block: new(big.Int), + EIP150Block: new(big.Int), EIP158Block: big.NewInt(2), }, Alloc: GenesisAlloc{address: {Balance: funds}}, diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go index 8d4cbf54f..0a452fb00 100644 --- a/core/forkid/forkid.go +++ b/core/forkid/forkid.go @@ -50,6 +50,9 @@ type ID struct { Next uint64 // Block number of the next upcoming fork, or 0 if no forks are known } +// Filter is a fork id filter to validate a remotely advertised ID. +type Filter func(id ID) error + // NewID calculates the Ethereum fork ID from the chain config and head. func NewID(chain *core.BlockChain) ID { return newID( @@ -80,9 +83,9 @@ func newID(config *params.ChainConfig, genesis common.Hash, head uint64) ID { return ID{Hash: checksumToBytes(hash), Next: next} } -// NewFilter creates an filter that returns if a fork ID should be rejected or not +// NewFilter creates a filter that returns if a fork ID should be rejected or not // based on the local chain's status. -func NewFilter(chain *core.BlockChain) func(id ID) error { +func NewFilter(chain *core.BlockChain) Filter { return newFilter( chain.Config(), chain.Genesis().Hash(), @@ -92,10 +95,16 @@ func NewFilter(chain *core.BlockChain) func(id ID) error { ) } +// NewStaticFilter creates a filter at block zero. +func NewStaticFilter(config *params.ChainConfig, genesis common.Hash) Filter { + head := func() uint64 { return 0 } + return newFilter(config, genesis, head) +} + // newFilter is the internal version of NewFilter, taking closures as its arguments // instead of a chain. The reason is to allow testing it without having to simulate // an entire blockchain. -func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) func(id ID) error { +func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() uint64) Filter { // Calculate the all the valid fork hash and fork next combos var ( forks = gatherForks(config) @@ -114,10 +123,13 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui // Create a validator that will filter out incompatible chains return func(id ID) error { // Run the fork checksum validation ruleset: - // 1. If local and remote FORK_CSUM matches, connect. + // 1. If local and remote FORK_CSUM matches, compare local head to FORK_NEXT. // The two nodes are in the same fork state currently. They might know // of differing future forks, but that's not relevant until the fork // triggers (might be postponed, nodes might be updated to match). + // 1a. A remotely announced but remotely not passed block is already passed + // locally, disconnect, since the chains are incompatible. + // 1b. No remotely announced fork; or not yet passed locally, connect. // 2. If the remote FORK_CSUM is a subset of the local past forks and the // remote FORK_NEXT matches with the locally following fork block number, // connect. @@ -139,7 +151,12 @@ func newFilter(config *params.ChainConfig, genesis common.Hash, headfn func() ui // Found the first unpassed fork block, check if our current state matches // the remote checksum (rule #1). if sums[i] == id.Hash { - // Yay, fork checksum matched, ignore any upcoming fork + // Fork checksum matched, check if a remote future fork block already passed + // locally without the local node being aware of it (rule #1a). + if id.Next > 0 && head >= id.Next { + return ErrLocalIncompatibleOrStale + } + // Haven't passed locally a remote-only fork, accept the connection (rule #1b). return nil } // The local and remote nodes are in different forks currently, check if the diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index 2995b7b5a..14fe1d8e2 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -55,8 +55,10 @@ func TestCreation(t *testing.T) { {4369999, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block {4370000, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block {7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block - {7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 0}}, // First and last Constantinople, first Petersburg block - {7987396, ID{Hash: checksumToBytes(0x668db0af), Next: 0}}, // Today Petersburg block + {7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block + {9068999, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block + {9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 0}}, // Today Istanbul block + {10000000, ID{Hash: checksumToBytes(0x879d6e30), Next: 0}}, // Future Istanbul block }, }, // Ropsten test cases @@ -151,7 +153,7 @@ func TestValidation(t *testing.T) { // Local is mainnet Petersburg, remote announces Byzantium + knowledge about Petersburg. Remote // is simply out of sync, accept. - {7987396, ID{Hash: checksumToBytes(0x668db0af), Next: 7280000}, nil}, + {7987396, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}, nil}, // Local is mainnet Petersburg, remote announces Spurious + knowledge about Byzantium. Remote // is definitely out of sync. It may or may not need the Petersburg update, we don't know yet. @@ -178,6 +180,16 @@ func TestValidation(t *testing.T) { // Local is mainnet Petersburg, remote is Rinkeby Petersburg. {7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale}, + + // Local is mainnet Istanbul, far in the future. Remote announces Gopherium (non existing fork) + // at some future block 88888888, for itself, but past block for local. Local is incompatible. + // + // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess). + {88888888, ID{Hash: checksumToBytes(0x879d6e30), Next: 88888888}, ErrLocalIncompatibleOrStale}, + + // Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing + // fork) at block 7279999, before Petersburg. Local is incompatible. + {7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7279999}, ErrLocalIncompatibleOrStale}, } for i, tt := range tests { filter := newFilter(params.MainnetChainConfig, params.MainnetGenesisHash, func() uint64 { return tt.head }) diff --git a/core/genesis.go b/core/genesis.go index 53d8ce13d..019769795 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -207,6 +207,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override if overrideIstanbul != nil { newcfg.IstanbulBlock = overrideIstanbul } + if err := newcfg.CheckConfigForkOrder(); err != nil { + return newcfg, common.Hash{}, err + } storedcfg := rawdb.ReadChainConfig(db, stored) if storedcfg == nil { log.Warn("Found genesis block without chain config") @@ -297,6 +300,13 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) { if block.Number().Sign() != 0 { return nil, fmt.Errorf("can't commit genesis block with number > 0") } + config := g.Config + if config == nil { + config = params.AllEthashProtocolChanges + } + if err := config.CheckConfigForkOrder(); err != nil { + return nil, err + } rawdb.WriteTd(db, block.Hash(), block.NumberU64(), g.Difficulty) rawdb.WriteBlock(db, block) rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil) @@ -304,11 +314,6 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) { rawdb.WriteHeadBlockHash(db, block.Hash()) rawdb.WriteHeadFastBlockHash(db, block.Hash()) rawdb.WriteHeadHeaderHash(db, block.Hash()) - - config := g.Config - if config == nil { - config = params.AllEthashProtocolChanges - } rawdb.WriteChainConfig(db, block.Hash(), config) return block, nil } diff --git a/core/state/sync_test.go b/core/state/sync_test.go index 17b3066b0..b689822e0 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -136,7 +136,7 @@ func TestEmptyStateSync(t *testing.T) { func TestIterativeStateSyncIndividual(t *testing.T) { testIterativeStateSync(t, 1) } func TestIterativeStateSyncBatched(t *testing.T) { testIterativeStateSync(t, 100) } -func testIterativeStateSync(t *testing.T, batch int) { +func testIterativeStateSync(t *testing.T, count int) { // Create a random state to copy srcDb, srcRoot, srcAccounts := makeTestState() @@ -144,7 +144,7 @@ func testIterativeStateSync(t *testing.T, batch int) { dstDb := rawdb.NewMemoryDatabase() sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb)) - queue := append([]common.Hash{}, sched.Missing(batch)...) + queue := append([]common.Hash{}, sched.Missing(count)...) for len(queue) > 0 { results := make([]trie.SyncResult, len(queue)) for i, hash := range queue { @@ -157,10 +157,12 @@ func testIterativeStateSync(t *testing.T, batch int) { if _, index, err := sched.Process(results); err != nil { t.Fatalf("failed to process result #%d: %v", index, err) } - if index, err := sched.Commit(dstDb); err != nil { - t.Fatalf("failed to commit data #%d: %v", index, err) + batch := dstDb.NewBatch() + if err := sched.Commit(batch); err != nil { + t.Fatalf("failed to commit data: %v", err) } - queue = append(queue[:0], sched.Missing(batch)...) + batch.Write() + queue = append(queue[:0], sched.Missing(count)...) } // Cross check that the two states are in sync checkStateAccounts(t, dstDb, srcRoot, srcAccounts) @@ -190,9 +192,11 @@ func TestIterativeDelayedStateSync(t *testing.T) { if _, index, err := sched.Process(results); err != nil { t.Fatalf("failed to process result #%d: %v", index, err) } - if index, err := sched.Commit(dstDb); err != nil { - t.Fatalf("failed to commit data #%d: %v", index, err) + batch := dstDb.NewBatch() + if err := sched.Commit(batch); err != nil { + t.Fatalf("failed to commit data: %v", err) } + batch.Write() queue = append(queue[len(results):], sched.Missing(0)...) } // Cross check that the two states are in sync @@ -205,7 +209,7 @@ func TestIterativeDelayedStateSync(t *testing.T) { func TestIterativeRandomStateSyncIndividual(t *testing.T) { testIterativeRandomStateSync(t, 1) } func TestIterativeRandomStateSyncBatched(t *testing.T) { testIterativeRandomStateSync(t, 100) } -func testIterativeRandomStateSync(t *testing.T, batch int) { +func testIterativeRandomStateSync(t *testing.T, count int) { // Create a random state to copy srcDb, srcRoot, srcAccounts := makeTestState() @@ -214,7 +218,7 @@ func testIterativeRandomStateSync(t *testing.T, batch int) { sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb)) queue := make(map[common.Hash]struct{}) - for _, hash := range sched.Missing(batch) { + for _, hash := range sched.Missing(count) { queue[hash] = struct{}{} } for len(queue) > 0 { @@ -231,11 +235,13 @@ func testIterativeRandomStateSync(t *testing.T, batch int) { if _, index, err := sched.Process(results); err != nil { t.Fatalf("failed to process result #%d: %v", index, err) } - if index, err := sched.Commit(dstDb); err != nil { - t.Fatalf("failed to commit data #%d: %v", index, err) + batch := dstDb.NewBatch() + if err := sched.Commit(batch); err != nil { + t.Fatalf("failed to commit data: %v", err) } + batch.Write() queue = make(map[common.Hash]struct{}) - for _, hash := range sched.Missing(batch) { + for _, hash := range sched.Missing(count) { queue[hash] = struct{}{} } } @@ -277,9 +283,11 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { if _, index, err := sched.Process(results); err != nil { t.Fatalf("failed to process result #%d: %v", index, err) } - if index, err := sched.Commit(dstDb); err != nil { - t.Fatalf("failed to commit data #%d: %v", index, err) + batch := dstDb.NewBatch() + if err := sched.Commit(batch); err != nil { + t.Fatalf("failed to commit data: %v", err) } + batch.Write() for _, hash := range sched.Missing(0) { queue[hash] = struct{}{} } @@ -316,9 +324,11 @@ func TestIncompleteStateSync(t *testing.T) { if _, index, err := sched.Process(results); err != nil { t.Fatalf("failed to process result #%d: %v", index, err) } - if index, err := sched.Commit(dstDb); err != nil { - t.Fatalf("failed to commit data #%d: %v", index, err) + batch := dstDb.NewBatch() + if err := sched.Commit(batch); err != nil { + t.Fatalf("failed to commit data: %v", err) } + batch.Write() for _, result := range results { added = append(added, result.Hash) } diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 2f61fa204..d8c9a37e9 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -106,8 +106,13 @@ func (c *ecrecover) Run(input []byte) ([]byte, error) { if !allZero(input[32:63]) || !crypto.ValidateSignatureValues(v, r, s, false) { return nil, nil } + // We must make sure not to modify the 'input', so placing the 'v' along with + // the signature needs to be done on a new allocation + sig := make([]byte, 65) + copy(sig, input[64:128]) + sig[64] = v // v needs to be at the end for libsecp256k1 - pubKey, err := crypto.Ecrecover(input[:32], append(input[64:128], v)) + pubKey, err := crypto.Ecrecover(input[:32], sig) // make sure the public key is a valid one if err != nil { return nil, nil diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go index 3703792da..11a92e1dc 100644 --- a/core/vm/contracts_test.go +++ b/core/vm/contracts_test.go @@ -17,6 +17,7 @@ package vm import ( + "bytes" "fmt" "math/big" "reflect" @@ -409,6 +410,11 @@ func testPrecompiled(addr string, test precompiledTest, t *testing.T) { } else if common.Bytes2Hex(res) != test.expected { t.Errorf("Expected %v, got %v", test.expected, common.Bytes2Hex(res)) } + // Verify that the precompile did not touch the input buffer + exp := common.Hex2Bytes(test.input) + if !bytes.Equal(in, exp) { + t.Errorf("Precompiled %v modified input data", addr) + } }) } @@ -423,6 +429,11 @@ func testPrecompiledFailure(addr string, test precompiledFailureTest, t *testing if !reflect.DeepEqual(err, test.expectedError) { t.Errorf("Expected error [%v], got [%v]", test.expectedError, err) } + // Verify that the precompile did not touch the input buffer + exp := common.Hex2Bytes(test.input) + if !bytes.Equal(in, exp) { + t.Errorf("Precompiled %v modified input data", addr) + } }) } @@ -574,3 +585,55 @@ func TestPrecompileBlake2FMalformedInput(t *testing.T) { testPrecompiledFailure("09", test, t) } } + +// EcRecover test vectors +var ecRecoverTests = []precompiledTest{ + { + input: "a8b53bdf3306a35a7103ab5504a0c9b492295564b6202b1942a84ef300107281" + + "000000000000000000000000000000000000000000000000000000000000001b" + + "3078356531653033663533636531386237373263636230303933666637316633" + + "6635336635633735623734646362333161383561613862383839326234653862" + + "1122334455667788991011121314151617181920212223242526272829303132", + expected: "", + name: "CallEcrecoverUnrecoverableKey", + }, + { + input: "18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c" + + "000000000000000000000000000000000000000000000000000000000000001c" + + "73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75f" + + "eeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549", + expected: "000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b", + name: "ValidKey", + }, + { + input: "18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c" + + "100000000000000000000000000000000000000000000000000000000000001c" + + "73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75f" + + "eeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549", + expected: "", + name: "InvalidHighV-bits-1", + }, + { + input: "18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c" + + "000000000000000000000000000000000000001000000000000000000000001c" + + "73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75f" + + "eeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549", + expected: "", + name: "InvalidHighV-bits-2", + }, + { + input: "18c547e4f7b0f325ad1e56f57e26c745b09a3e503d86e00e5255ff7f715d3d1c" + + "000000000000000000000000000000000000001000000000000000000000011c" + + "73b1693892219d736caba55bdb67216e485557ea6b6af75f37096c9aa6a5a75f" + + "eeb940b1d03b21e36b0e47e79769f095fe2ab855bd91e3a38756b7d75a9c4549", + expected: "", + name: "InvalidHighV-bits-3", + }, +} + +func TestPrecompiledEcrecover(t *testing.T) { + for _, test := range ecRecoverTests { + testPrecompiled("01", test, t) + } + +} diff --git a/core/vm/instructions.go b/core/vm/instructions.go index b1c824a96..104a5ea7e 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -384,7 +384,7 @@ func opSAR(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory * func opSha3(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { offset, size := stack.pop(), stack.pop() - data := memory.Get(offset.Int64(), size.Int64()) + data := memory.GetPtr(offset.Int64(), size.Int64()) if interpreter.hasher == nil { interpreter.hasher = sha3.NewLegacyKeccak256().(keccakState) @@ -602,11 +602,9 @@ func opPop(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory * } func opMload(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - offset := stack.pop() - val := interpreter.intPool.get().SetBytes(memory.Get(offset.Int64(), 32)) - stack.push(val) - - interpreter.intPool.put(offset) + v := stack.peek() + offset := v.Int64() + v.SetBytes(memory.GetPtr(offset, 32)) return nil, nil } @@ -691,7 +689,7 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memor var ( value = stack.pop() offset, size = stack.pop(), stack.pop() - input = memory.Get(offset.Int64(), size.Int64()) + input = memory.GetCopy(offset.Int64(), size.Int64()) gas = contract.Gas ) if interpreter.evm.chainRules.IsEIP150 { @@ -725,7 +723,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memo endowment = stack.pop() offset, size = stack.pop(), stack.pop() salt = stack.pop() - input = memory.Get(offset.Int64(), size.Int64()) + input = memory.GetCopy(offset.Int64(), size.Int64()) gas = contract.Gas ) @@ -757,7 +755,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, contract *Contract, memory toAddr := common.BigToAddress(addr) value = math.U256(value) // Get the arguments from the memory. - args := memory.Get(inOffset.Int64(), inSize.Int64()) + args := memory.GetPtr(inOffset.Int64(), inSize.Int64()) if value.Sign() != 0 { gas += params.CallStipend @@ -786,7 +784,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, contract *Contract, mem toAddr := common.BigToAddress(addr) value = math.U256(value) // Get arguments from the memory. - args := memory.Get(inOffset.Int64(), inSize.Int64()) + args := memory.GetPtr(inOffset.Int64(), inSize.Int64()) if value.Sign() != 0 { gas += params.CallStipend @@ -814,7 +812,7 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, contract *Contract, addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.BigToAddress(addr) // Get arguments from the memory. - args := memory.Get(inOffset.Int64(), inSize.Int64()) + args := memory.GetPtr(inOffset.Int64(), inSize.Int64()) ret, returnGas, err := interpreter.evm.DelegateCall(contract, toAddr, args, gas) if err != nil { @@ -839,7 +837,7 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, contract *Contract, m addr, inOffset, inSize, retOffset, retSize := stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop() toAddr := common.BigToAddress(addr) // Get arguments from the memory. - args := memory.Get(inOffset.Int64(), inSize.Int64()) + args := memory.GetPtr(inOffset.Int64(), inSize.Int64()) ret, returnGas, err := interpreter.evm.StaticCall(contract, toAddr, args, gas) if err != nil { @@ -895,7 +893,7 @@ func makeLog(size int) executionFunc { topics[i] = common.BigToHash(stack.pop()) } - d := memory.Get(mStart.Int64(), mSize.Int64()) + d := memory.GetCopy(mStart.Int64(), mSize.Int64()) interpreter.evm.StateDB.AddLog(&types.Log{ Address: contract.Address(), Topics: topics, diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index 73ee20691..9c2a978ec 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -509,12 +509,12 @@ func TestOpMstore(t *testing.T) { v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700" stack.pushN(new(big.Int).SetBytes(common.Hex2Bytes(v)), big.NewInt(0)) opMstore(&pc, evmInterpreter, nil, mem, stack) - if got := common.Bytes2Hex(mem.Get(0, 32)); got != v { + if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v { t.Fatalf("Mstore fail, got %v, expected %v", got, v) } stack.pushN(big.NewInt(0x1), big.NewInt(0)) opMstore(&pc, evmInterpreter, nil, mem, stack) - if common.Bytes2Hex(mem.Get(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" { + if common.Bytes2Hex(mem.GetCopy(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" { t.Fatalf("Mstore failed to overwrite previous value") } poolOfIntPools.put(evmInterpreter.intPool) diff --git a/core/vm/memory.go b/core/vm/memory.go index e7a9042f9..04b15bb59 100644 --- a/core/vm/memory.go +++ b/core/vm/memory.go @@ -70,7 +70,7 @@ func (m *Memory) Resize(size uint64) { } // Get returns offset + size as a new slice -func (m *Memory) Get(offset, size int64) (cpy []byte) { +func (m *Memory) GetCopy(offset, size int64) (cpy []byte) { if size == 0 { return nil } diff --git a/dashboard/README.md b/dashboard/README.md index 641c5f44b..67b65bda3 100644 --- a/dashboard/README.md +++ b/dashboard/README.md @@ -48,8 +48,8 @@ For more IDE support install the `linter-eslint` package too, which finds the `. [ESLint]: https://eslint.org/ [Airbnb]: https://github.com/airbnb/javascript/tree/master/react [Webpack]: https://webpack.github.io/ -[WA]: http://webpack.github.io/analyse/ -[WV]: http://chrisbateman.github.io/webpack-visualizer/ +[WA]: https://webpack.github.io/analyse/ +[WV]: https://chrisbateman.github.io/webpack-visualizer/ [Node.js]: https://nodejs.org/en/ [Flow]: https://flow.org/ [Atom]: https://atom.io/ diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index f630b42ce..9e17023f5 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -1574,13 +1574,14 @@ func (d *Downloader) importBlockResults(results []*fetchResult) error { func (d *Downloader) processFastSyncContent(latest *types.Header) error { // Start syncing state of the reported head block. This should get us most of // the state of the pivot block. - stateSync := d.syncState(latest.Root) - defer stateSync.Cancel() - go func() { - if err := stateSync.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled { + sync := d.syncState(latest.Root) + defer sync.Cancel() + closeOnErr := func(s *stateSync) { + if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled { d.queue.Close() // wake up Results } - }() + } + go closeOnErr(sync) // Figure out the ideal pivot block. Note, that this goalpost may move if the // sync takes long enough for the chain head to move significantly. pivot := uint64(0) @@ -1600,12 +1601,12 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { if len(results) == 0 { // If pivot sync is done, stop if oldPivot == nil { - return stateSync.Cancel() + return sync.Cancel() } // If sync failed, stop select { case <-d.cancelCh: - stateSync.Cancel() + sync.Cancel() return errCanceled default: } @@ -1625,28 +1626,24 @@ func (d *Downloader) processFastSyncContent(latest *types.Header) error { } } P, beforeP, afterP := splitAroundPivot(pivot, results) - if err := d.commitFastSyncData(beforeP, stateSync); err != nil { + if err := d.commitFastSyncData(beforeP, sync); err != nil { return err } if P != nil { // If new pivot block found, cancel old state retrieval and restart if oldPivot != P { - stateSync.Cancel() + sync.Cancel() - stateSync = d.syncState(P.Header.Root) - defer stateSync.Cancel() - go func() { - if err := stateSync.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled { - d.queue.Close() // wake up Results - } - }() + sync = d.syncState(P.Header.Root) + defer sync.Cancel() + go closeOnErr(sync) oldPivot = P } // Wait for completion, occasionally checking for pivot staleness select { - case <-stateSync.done: - if stateSync.err != nil { - return stateSync.err + case <-sync.done: + if sync.err != nil { + return sync.err } if err := d.commitPivotBlock(P); err != nil { return err diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go index 4bd45d261..52362b18a 100644 --- a/eth/downloader/statesync.go +++ b/eth/downloader/statesync.go @@ -347,7 +347,7 @@ func (s *stateSync) commit(force bool) error { } start := time.Now() b := s.d.stateDB.NewBatch() - if written, err := s.sched.Commit(b); written == 0 || err != nil { + if err := s.sched.Commit(b); err != nil { return err } if err := b.Write(); err != nil { diff --git a/eth/handler.go b/eth/handler.go index a36ee26f8..61f63329a 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -29,6 +29,7 @@ import ( "github.com/ethersocial/go-ethersocial/common" "github.com/ethersocial/go-ethersocial/consensus" "github.com/ethersocial/go-ethersocial/core" + "github.com/ethersocial/go-ethersocial/core/forkid" "github.com/ethersocial/go-ethersocial/core/types" "github.com/ethersocial/go-ethersocial/eth/downloader" "github.com/ethersocial/go-ethersocial/eth/fetcher" @@ -63,7 +64,8 @@ func errResp(code errCode, format string, v ...interface{}) error { } type ProtocolManager struct { - networkID uint64 + networkID uint64 + forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks) acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing) @@ -103,6 +105,7 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh // Create the protocol manager with the base fields manager := &ProtocolManager{ networkID: networkID, + forkFilter: forkid.NewFilter(blockchain), eventMux: mux, txpool: txpool, blockchain: blockchain, @@ -304,7 +307,7 @@ func (pm *ProtocolManager) handle(p *peer) error { number = head.Number.Uint64() td = pm.blockchain.GetTd(hash, number) ) - if err := p.Handshake(pm.networkID, td, hash, genesis.Hash()); err != nil { + if err := p.Handshake(pm.networkID, td, hash, genesis.Hash(), forkid.NewID(pm.blockchain), pm.forkFilter); err != nil { p.Log().Debug("Ethereum handshake failed", "err", err) return err } diff --git a/eth/handler_test.go b/eth/handler_test.go index 50535d2cb..2aa8056bc 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -39,8 +39,8 @@ import ( ) // Tests that block headers can be retrieved from a remote chain based on user queries. -func TestGetBlockHeaders62(t *testing.T) { testGetBlockHeaders(t, 62) } func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) } +func TestGetBlockHeaders64(t *testing.T) { testGetBlockHeaders(t, 64) } func testGetBlockHeaders(t *testing.T, protocol int) { pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, downloader.MaxHashFetch+15, nil, nil) @@ -198,8 +198,8 @@ func testGetBlockHeaders(t *testing.T, protocol int) { } // Tests that block contents can be retrieved from a remote chain based on their hashes. -func TestGetBlockBodies62(t *testing.T) { testGetBlockBodies(t, 62) } func TestGetBlockBodies63(t *testing.T) { testGetBlockBodies(t, 63) } +func TestGetBlockBodies64(t *testing.T) { testGetBlockBodies(t, 64) } func testGetBlockBodies(t *testing.T, protocol int) { pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, downloader.MaxBlockFetch+15, nil, nil) @@ -271,6 +271,7 @@ func testGetBlockBodies(t *testing.T, protocol int) { // Tests that the node state database can be retrieved based on hashes. func TestGetNodeData63(t *testing.T) { testGetNodeData(t, 63) } +func TestGetNodeData64(t *testing.T) { testGetNodeData(t, 64) } func testGetNodeData(t *testing.T, protocol int) { // Define three accounts to simulate transactions with @@ -367,6 +368,7 @@ func testGetNodeData(t *testing.T, protocol int) { // Tests that the transaction receipts can be retrieved based on hashes. func TestGetReceipt63(t *testing.T) { testGetReceipt(t, 63) } +func TestGetReceipt64(t *testing.T) { testGetReceipt(t, 64) } func testGetReceipt(t *testing.T, protocol int) { // Define three accounts to simulate transactions with diff --git a/eth/helper_test.go b/eth/helper_test.go index a7d21db4b..c558af6fc 100644 --- a/eth/helper_test.go +++ b/eth/helper_test.go @@ -22,6 +22,7 @@ package eth import ( "crypto/ecdsa" "crypto/rand" + "fmt" "math/big" "sort" "sync" @@ -30,6 +31,7 @@ import ( "github.com/ethersocial/go-ethersocial/common" "github.com/ethersocial/go-ethersocial/consensus/ethash" "github.com/ethersocial/go-ethersocial/core" + "github.com/ethersocial/go-ethersocial/core/forkid" "github.com/ethersocial/go-ethersocial/core/rawdb" "github.com/ethersocial/go-ethersocial/core/types" "github.com/ethersocial/go-ethersocial/core/vm" @@ -171,20 +173,35 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te head = pm.blockchain.CurrentHeader() td = pm.blockchain.GetTd(head.Hash(), head.Number.Uint64()) ) - tp.handshake(nil, td, head.Hash(), genesis.Hash()) + tp.handshake(nil, td, head.Hash(), genesis.Hash(), forkid.NewID(pm.blockchain), forkid.NewFilter(pm.blockchain)) } return tp, errc } // handshake simulates a trivial handshake that expects the same state from the // remote side as we are simulating locally. -func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesis common.Hash) { - msg := &statusData{ - ProtocolVersion: uint32(p.version), - NetworkId: DefaultConfig.NetworkId, - TD: td, - CurrentBlock: head, - GenesisBlock: genesis, +func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) { + var msg interface{} + switch { + case p.version == eth63: + msg = &statusData63{ + ProtocolVersion: uint32(p.version), + NetworkId: DefaultConfig.NetworkId, + TD: td, + CurrentBlock: head, + GenesisBlock: genesis, + } + case p.version == eth64: + msg = &statusData{ + ProtocolVersion: uint32(p.version), + NetworkID: DefaultConfig.NetworkId, + TD: td, + Head: head, + Genesis: genesis, + ForkID: forkID, + } + default: + panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version)) } if err := p2p.ExpectMsg(p.app, StatusMsg, msg); err != nil { t.Fatalf("status recv: %v", err) diff --git a/eth/peer.go b/eth/peer.go index 43bccaae5..c0054487d 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -25,6 +25,7 @@ import ( mapset "github.com/deckarep/golang-set" "github.com/ethersocial/go-ethersocial/common" + "github.com/ethersocial/go-ethersocial/core/forkid" "github.com/ethersocial/go-ethersocial/core/types" "github.com/ethersocial/go-ethersocial/p2p" "github.com/ethersocial/go-ethersocial/rlp" @@ -353,22 +354,46 @@ func (p *peer) RequestReceipts(hashes []common.Hash) error { // Handshake executes the eth protocol handshake, negotiating version number, // network IDs, difficulties, head and genesis blocks. -func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash) error { +func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) error { // Send out own handshake in a new thread errc := make(chan error, 2) - var status statusData // safe to read after two values have been received from errc + var ( + status63 statusData63 // safe to read after two values have been received from errc + status statusData // safe to read after two values have been received from errc + ) go func() { - errc <- p2p.Send(p.rw, StatusMsg, &statusData{ - ProtocolVersion: uint32(p.version), - NetworkId: network, - TD: td, - CurrentBlock: head, - GenesisBlock: genesis, - }) + switch { + case p.version == eth63: + errc <- p2p.Send(p.rw, StatusMsg, &statusData63{ + ProtocolVersion: uint32(p.version), + NetworkId: network, + TD: td, + CurrentBlock: head, + GenesisBlock: genesis, + }) + case p.version == eth64: + errc <- p2p.Send(p.rw, StatusMsg, &statusData{ + ProtocolVersion: uint32(p.version), + NetworkID: network, + TD: td, + Head: head, + Genesis: genesis, + ForkID: forkID, + }) + default: + panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version)) + } }() go func() { - errc <- p.readStatus(network, &status, genesis) + switch { + case p.version == eth63: + errc <- p.readStatusLegacy(network, &status63, genesis) + case p.version == eth64: + errc <- p.readStatus(network, &status, genesis, forkFilter) + default: + panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version)) + } }() timeout := time.NewTimer(handshakeTimeout) defer timeout.Stop() @@ -382,11 +407,18 @@ func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis return p2p.DiscReadTimeout } } - p.td, p.head = status.TD, status.CurrentBlock + switch { + case p.version == eth63: + p.td, p.head = status63.TD, status63.CurrentBlock + case p.version == eth64: + p.td, p.head = status.TD, status.Head + default: + panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version)) + } return nil } -func (p *peer) readStatus(network uint64, status *statusData, genesis common.Hash) (err error) { +func (p *peer) readStatusLegacy(network uint64, status *statusData63, genesis common.Hash) error { msg, err := p.rw.ReadMsg() if err != nil { return err @@ -402,10 +434,10 @@ func (p *peer) readStatus(network uint64, status *statusData, genesis common.Has return errResp(ErrDecode, "msg %v: %v", msg, err) } if status.GenesisBlock != genesis { - return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", status.GenesisBlock[:8], genesis[:8]) + return errResp(ErrGenesisMismatch, "%x (!= %x)", status.GenesisBlock[:8], genesis[:8]) } if status.NetworkId != network { - return errResp(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, network) + return errResp(ErrNetworkIDMismatch, "%d (!= %d)", status.NetworkId, network) } if int(status.ProtocolVersion) != p.version { return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version) @@ -413,6 +445,36 @@ func (p *peer) readStatus(network uint64, status *statusData, genesis common.Has return nil } +func (p *peer) readStatus(network uint64, status *statusData, genesis common.Hash, forkFilter forkid.Filter) error { + msg, err := p.rw.ReadMsg() + if err != nil { + return err + } + if msg.Code != StatusMsg { + return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg) + } + if msg.Size > protocolMaxMsgSize { + return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize) + } + // Decode the handshake and make sure everything matches + if err := msg.Decode(&status); err != nil { + return errResp(ErrDecode, "msg %v: %v", msg, err) + } + if status.NetworkID != network { + return errResp(ErrNetworkIDMismatch, "%d (!= %d)", status.NetworkID, network) + } + if int(status.ProtocolVersion) != p.version { + return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version) + } + if status.Genesis != genesis { + return errResp(ErrGenesisMismatch, "%x (!= %x)", status.Genesis, genesis) + } + if err := forkFilter(status.ForkID); err != nil { + return errResp(ErrForkIDRejected, "%v", err) + } + return nil +} + // String implements fmt.Stringer. func (p *peer) String() string { return fmt.Sprintf("Peer %s [%s]", p.id, diff --git a/eth/protocol.go b/eth/protocol.go index 6d5bf62d2..1f44e6d55 100644 --- a/eth/protocol.go +++ b/eth/protocol.go @@ -23,6 +23,7 @@ import ( "github.com/ethersocial/go-ethersocial/common" "github.com/ethersocial/go-ethersocial/core" + "github.com/ethersocial/go-ethersocial/core/forkid" "github.com/ethersocial/go-ethersocial/core/types" "github.com/ethersocial/go-ethersocial/event" "github.com/ethersocial/go-ethersocial/rlp" @@ -30,24 +31,23 @@ import ( // Constants to match up protocol versions and messages const ( - eth62 = 62 eth63 = 63 + eth64 = 64 ) // protocolName is the official short name of the protocol used during capability negotiation. const protocolName = "eth" // ProtocolVersions are the supported versions of the eth protocol (first is primary). -var ProtocolVersions = []uint{eth63} +var ProtocolVersions = []uint{eth64, eth63} // protocolLengths are the number of implemented message corresponding to different protocol versions. -var protocolLengths = map[uint]uint64{eth63: 17, eth62: 8} +var protocolLengths = map[uint]uint64{eth64: 17, eth63: 17} const protocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message // eth protocol message codes const ( - // Protocol messages belonging to eth/62 StatusMsg = 0x00 NewBlockHashesMsg = 0x01 TxMsg = 0x02 @@ -56,12 +56,10 @@ const ( GetBlockBodiesMsg = 0x05 BlockBodiesMsg = 0x06 NewBlockMsg = 0x07 - - // Protocol messages belonging to eth/63 - GetNodeDataMsg = 0x0d - NodeDataMsg = 0x0e - GetReceiptsMsg = 0x0f - ReceiptsMsg = 0x10 + GetNodeDataMsg = 0x0d + NodeDataMsg = 0x0e + GetReceiptsMsg = 0x0f + ReceiptsMsg = 0x10 ) type errCode int @@ -71,11 +69,11 @@ const ( ErrDecode ErrInvalidMsgCode ErrProtocolVersionMismatch - ErrNetworkIdMismatch - ErrGenesisBlockMismatch + ErrNetworkIDMismatch + ErrGenesisMismatch + ErrForkIDRejected ErrNoStatusMsg ErrExtraStatusMsg - ErrSuspendedPeer ) func (e errCode) String() string { @@ -88,11 +86,11 @@ var errorToString = map[int]string{ ErrDecode: "Invalid message", ErrInvalidMsgCode: "Invalid message code", ErrProtocolVersionMismatch: "Protocol version mismatch", - ErrNetworkIdMismatch: "NetworkId mismatch", - ErrGenesisBlockMismatch: "Genesis block mismatch", + ErrNetworkIDMismatch: "Network ID mismatch", + ErrGenesisMismatch: "Genesis mismatch", + ErrForkIDRejected: "Fork ID rejected", ErrNoStatusMsg: "No status message", ErrExtraStatusMsg: "Extra status message", - ErrSuspendedPeer: "Suspended peer", } type txPool interface { @@ -108,8 +106,8 @@ type txPool interface { SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription } -// statusData is the network packet for the status message. -type statusData struct { +// statusData63 is the network packet for the status message for eth/63. +type statusData63 struct { ProtocolVersion uint32 NetworkId uint64 TD *big.Int @@ -117,6 +115,16 @@ type statusData struct { GenesisBlock common.Hash } +// statusData is the network packet for the status message for eth/64 and later. +type statusData struct { + ProtocolVersion uint32 + NetworkID uint64 + TD *big.Int + Head common.Hash + Genesis common.Hash + ForkID forkid.ID +} + // newBlockHashesData is the network packet for the block announcements. type newBlockHashesData []struct { Hash common.Hash // Hash of one particular block being announced diff --git a/eth/protocol_test.go b/eth/protocol_test.go index 5098790cb..7734848cb 100644 --- a/eth/protocol_test.go +++ b/eth/protocol_test.go @@ -18,15 +18,24 @@ package eth import ( "fmt" + "math/big" "sync" "testing" "time" "github.com/ethersocial/go-ethersocial/common" + "github.com/ethersocial/go-ethersocial/consensus/ethash" + "github.com/ethersocial/go-ethersocial/core" + "github.com/ethersocial/go-ethersocial/core/forkid" + "github.com/ethersocial/go-ethersocial/core/rawdb" "github.com/ethersocial/go-ethersocial/core/types" + "github.com/ethersocial/go-ethersocial/core/vm" "github.com/ethersocial/go-ethersocial/crypto" "github.com/ethersocial/go-ethersocial/eth/downloader" + "github.com/ethersocial/go-ethersocial/event" "github.com/ethersocial/go-ethersocial/p2p" + "github.com/ethersocial/go-ethersocial/p2p/enode" + "github.com/ethersocial/go-ethersocial/params" "github.com/ethersocial/go-ethersocial/rlp" ) @@ -37,10 +46,7 @@ func init() { var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") // Tests that handshake failures are detected and reported correctly. -func TestStatusMsgErrors62(t *testing.T) { testStatusMsgErrors(t, 62) } -func TestStatusMsgErrors63(t *testing.T) { testStatusMsgErrors(t, 63) } - -func testStatusMsgErrors(t *testing.T, protocol int) { +func TestStatusMsgErrors63(t *testing.T) { pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil) var ( genesis = pm.blockchain.Genesis() @@ -59,21 +65,76 @@ func testStatusMsgErrors(t *testing.T, protocol int) { wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"), }, { - code: StatusMsg, data: statusData{10, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash()}, - wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", protocol), + code: StatusMsg, data: statusData63{10, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash()}, + wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", 63), }, { - code: StatusMsg, data: statusData{uint32(protocol), 999, td, head.Hash(), genesis.Hash()}, - wantError: errResp(ErrNetworkIdMismatch, "999 (!= %d)", DefaultConfig.NetworkId), + code: StatusMsg, data: statusData63{63, 999, td, head.Hash(), genesis.Hash()}, + wantError: errResp(ErrNetworkIDMismatch, "999 (!= %d)", DefaultConfig.NetworkId), }, { - code: StatusMsg, data: statusData{uint32(protocol), DefaultConfig.NetworkId, td, head.Hash(), common.Hash{3}}, - wantError: errResp(ErrGenesisBlockMismatch, "0300000000000000 (!= %x)", genesis.Hash().Bytes()[:8]), + code: StatusMsg, data: statusData63{63, DefaultConfig.NetworkId, td, head.Hash(), common.Hash{3}}, + wantError: errResp(ErrGenesisMismatch, "0300000000000000 (!= %x)", genesis.Hash().Bytes()[:8]), }, } + for i, test := range tests { + p, errc := newTestPeer("peer", 63, pm, false) + // The send call might hang until reset because + // the protocol might not read the payload. + go p2p.Send(p.app, test.code, test.data) + + select { + case err := <-errc: + if err == nil { + t.Errorf("test %d: protocol returned nil error, want %q", i, test.wantError) + } else if err.Error() != test.wantError.Error() { + t.Errorf("test %d: wrong error: got %q, want %q", i, err, test.wantError) + } + case <-time.After(2 * time.Second): + t.Errorf("protocol did not shut down within 2 seconds") + } + p.close() + } +} + +func TestStatusMsgErrors64(t *testing.T) { + pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil) + var ( + genesis = pm.blockchain.Genesis() + head = pm.blockchain.CurrentHeader() + td = pm.blockchain.GetTd(head.Hash(), head.Number.Uint64()) + forkID = forkid.NewID(pm.blockchain) + ) + defer pm.Stop() + tests := []struct { + code uint64 + data interface{} + wantError error + }{ + { + code: TxMsg, data: []interface{}{}, + wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"), + }, + { + code: StatusMsg, data: statusData{10, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash(), forkID}, + wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", 64), + }, + { + code: StatusMsg, data: statusData{64, 999, td, head.Hash(), genesis.Hash(), forkID}, + wantError: errResp(ErrNetworkIDMismatch, "999 (!= %d)", DefaultConfig.NetworkId), + }, + { + code: StatusMsg, data: statusData{64, DefaultConfig.NetworkId, td, head.Hash(), common.Hash{3}, forkID}, + wantError: errResp(ErrGenesisMismatch, "0300000000000000000000000000000000000000000000000000000000000000 (!= %x)", genesis.Hash()), + }, + { + code: StatusMsg, data: statusData{64, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash(), forkid.ID{Hash: [4]byte{0x00, 0x01, 0x02, 0x03}}}, + wantError: errResp(ErrForkIDRejected, forkid.ErrLocalIncompatibleOrStale.Error()), + }, + } for i, test := range tests { - p, errc := newTestPeer("peer", protocol, pm, false) + p, errc := newTestPeer("peer", 64, pm, false) // The send call might hang until reset because // the protocol might not read the payload. go p2p.Send(p.app, test.code, test.data) @@ -92,9 +153,99 @@ func testStatusMsgErrors(t *testing.T, protocol int) { } } +func TestForkIDSplit(t *testing.T) { + var ( + engine = ethash.NewFaker() + + configNoFork = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1)} + configProFork = ¶ms.ChainConfig{ + HomesteadBlock: big.NewInt(1), + EIP150Block: big.NewInt(2), + EIP155Block: big.NewInt(2), + EIP158Block: big.NewInt(2), + ByzantiumBlock: big.NewInt(3), + } + dbNoFork = rawdb.NewMemoryDatabase() + dbProFork = rawdb.NewMemoryDatabase() + + gspecNoFork = &core.Genesis{Config: configNoFork} + gspecProFork = &core.Genesis{Config: configProFork} + + genesisNoFork = gspecNoFork.MustCommit(dbNoFork) + genesisProFork = gspecProFork.MustCommit(dbProFork) + + chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil) + chainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{}, nil) + + blocksNoFork, _ = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil) + blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil) + + ethNoFork, _ = NewProtocolManager(configNoFork, nil, downloader.FullSync, 1, new(event.TypeMux), new(testTxPool), engine, chainNoFork, dbNoFork, 1, nil) + ethProFork, _ = NewProtocolManager(configProFork, nil, downloader.FullSync, 1, new(event.TypeMux), new(testTxPool), engine, chainProFork, dbProFork, 1, nil) + ) + ethNoFork.Start(1000) + ethProFork.Start(1000) + + // Both nodes should allow the other to connect (same genesis, next fork is the same) + p2pNoFork, p2pProFork := p2p.MsgPipe() + peerNoFork := newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork) + peerProFork := newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork) + + errc := make(chan error, 2) + go func() { errc <- ethNoFork.handle(peerProFork) }() + go func() { errc <- ethProFork.handle(peerNoFork) }() + + select { + case err := <-errc: + t.Fatalf("frontier nofork <-> profork failed: %v", err) + case <-time.After(250 * time.Millisecond): + p2pNoFork.Close() + p2pProFork.Close() + } + // Progress into Homestead. Fork's match, so we don't care what the future holds + chainNoFork.InsertChain(blocksNoFork[:1]) + chainProFork.InsertChain(blocksProFork[:1]) + + p2pNoFork, p2pProFork = p2p.MsgPipe() + peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork) + peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork) + + errc = make(chan error, 2) + go func() { errc <- ethNoFork.handle(peerProFork) }() + go func() { errc <- ethProFork.handle(peerNoFork) }() + + select { + case err := <-errc: + t.Fatalf("homestead nofork <-> profork failed: %v", err) + case <-time.After(250 * time.Millisecond): + p2pNoFork.Close() + p2pProFork.Close() + } + // Progress into Spurious. Forks mismatch, signalling differing chains, reject + chainNoFork.InsertChain(blocksNoFork[1:2]) + chainProFork.InsertChain(blocksProFork[1:2]) + + p2pNoFork, p2pProFork = p2p.MsgPipe() + peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork) + peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork) + + errc = make(chan error, 2) + go func() { errc <- ethNoFork.handle(peerProFork) }() + go func() { errc <- ethProFork.handle(peerNoFork) }() + + select { + case err := <-errc: + if want := errResp(ErrForkIDRejected, forkid.ErrLocalIncompatibleOrStale.Error()); err.Error() != want.Error() { + t.Fatalf("fork ID rejection error mismatch: have %v, want %v", err, want) + } + case <-time.After(250 * time.Millisecond): + t.Fatalf("split peers not rejected") + } +} + // This test checks that received transactions are added to the local pool. -func TestRecvTransactions62(t *testing.T) { testRecvTransactions(t, 62) } func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) } +func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) } func testRecvTransactions(t *testing.T, protocol int) { txAdded := make(chan []*types.Transaction) @@ -121,8 +272,8 @@ func testRecvTransactions(t *testing.T, protocol int) { } // This test checks that pending transactions are sent. -func TestSendTransactions62(t *testing.T) { testSendTransactions(t, 62) } func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) } +func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) } func testSendTransactions(t *testing.T, protocol int) { pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil) diff --git a/eth/tracers/tracer.go b/eth/tracers/tracer.go index 8af2539df..2db216fab 100644 --- a/eth/tracers/tracer.go +++ b/eth/tracers/tracer.go @@ -99,7 +99,7 @@ func (mw *memoryWrapper) slice(begin, end int64) []byte { log.Warn("Tracer accessed out of bound memory", "available", mw.memory.Len(), "offset", begin, "size", end-begin) return nil } - return mw.memory.Get(begin, end-begin) + return mw.memory.GetCopy(begin, end-begin) } // getUint returns the 32 bytes at the specified address interpreted as a uint. diff --git a/internal/build/archive.go b/internal/build/archive.go index ac680ba63..8571edd5a 100644 --- a/internal/build/archive.go +++ b/internal/build/archive.go @@ -183,3 +183,49 @@ func (a *TarballArchive) Close() error { } return a.file.Close() } + +func ExtractTarballArchive(archive string, dest string) error { + // We're only interested in gzipped archives, wrap the reader now + ar, err := os.Open(archive) + if err != nil { + return err + } + defer ar.Close() + + gzr, err := gzip.NewReader(ar) + if err != nil { + return err + } + defer gzr.Close() + + // Iterate over all the files in the tarball + tr := tar.NewReader(gzr) + for { + // Fetch the next tarball header and abort if needed + header, err := tr.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + // Figure out the target and create it + target := filepath.Join(dest, header.Name) + + switch header.Typeflag { + case tar.TypeDir: + if err := os.MkdirAll(target, 0755); err != nil { + return err + } + case tar.TypeReg: + file, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) + if err != nil { + return err + } + if _, err := io.Copy(file, tr); err != nil { + return err + } + file.Close() + } + } +} diff --git a/internal/build/gosrc.go b/internal/build/gosrc.go new file mode 100644 index 000000000..c85e46968 --- /dev/null +++ b/internal/build/gosrc.go @@ -0,0 +1,81 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package build + +import ( + "bytes" + "crypto/sha256" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strings" +) + +// EnsureGoSources ensures that path contains a file with the given SHA256 hash, +// and if not, it downloads a fresh Go source package from upstream and replaces +// path with it (if the hash matches). +func EnsureGoSources(version string, hash []byte, path string) error { + // Sanity check the destination path to ensure we don't do weird things + if !strings.HasSuffix(path, ".tar.gz") { + return fmt.Errorf("destination path (%s) must end with .tar.gz", path) + } + // If the file exists, validate it's hash + if archive, err := ioutil.ReadFile(path); err == nil { // Go sources are ~20MB, it's fine to read all + hasher := sha256.New() + hasher.Write(archive) + have := hasher.Sum(nil) + + if bytes.Equal(have, hash) { + fmt.Printf("Go %s [%x] available at %s\n", version, hash, path) + return nil + } + fmt.Printf("Go %s hash mismatch (have %x, want %x) at %s, deleting old archive\n", version, have, hash, path) + if err := os.Remove(path); err != nil { + return err + } + } + // Archive missing or bad hash, download a new one + fmt.Printf("Downloading Go %s [want %x] into %s\n", version, hash, path) + + res, err := http.Get(fmt.Sprintf("https://dl.google.com/go/go%s.src.tar.gz", version)) + if err != nil || res.StatusCode != http.StatusOK { + return fmt.Errorf("failed to access Go sources: code %d, err %v", res.StatusCode, err) + } + defer res.Body.Close() + + archive, err := ioutil.ReadAll(res.Body) + if err != nil { + return err + } + // Sanity check the downloaded archive, save if checks out + hasher := sha256.New() + hasher.Write(archive) + + if have := hasher.Sum(nil); !bytes.Equal(have, hash) { + return fmt.Errorf("downloaded Go %s hash mismatch (have %x, want %x)", version, have, hash) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(path, archive, 0644); err != nil { + return err + } + fmt.Printf("Downloaded Go %s [%x] into %s\n", version, hash, path) + return nil +} diff --git a/les/balance.go b/les/balance.go index aefdf3c73..08b2614c2 100644 --- a/les/balance.go +++ b/les/balance.go @@ -67,7 +67,7 @@ type balanceCallback struct { // init initializes balanceTracker func (bt *balanceTracker) init(clock mclock.Clock, capacity uint64) { bt.clock = clock - bt.initTime = clock.Now() + bt.initTime, bt.lastUpdate = clock.Now(), clock.Now() // Init timestamps for i := range bt.callbackIndex { bt.callbackIndex[i] = -1 } diff --git a/les/balance_test.go b/les/balance_test.go new file mode 100644 index 000000000..3469275a5 --- /dev/null +++ b/les/balance_test.go @@ -0,0 +1,260 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package les + +import ( + "testing" + "time" + + "github.com/ethersocial/go-ethersocial/common/mclock" +) + +func TestSetBalance(t *testing.T) { + var clock = &mclock.Simulated{} + var inputs = []struct { + pos uint64 + neg uint64 + }{ + {1000, 0}, + {0, 1000}, + {1000, 1000}, + } + + tracker := balanceTracker{} + tracker.init(clock, 1000) + defer tracker.stop(clock.Now()) + + for _, i := range inputs { + tracker.setBalance(i.pos, i.neg) + pos, neg := tracker.getBalance(clock.Now()) + if pos != i.pos { + t.Fatalf("Positive balance mismatch, want %v, got %v", i.pos, pos) + } + if neg != i.neg { + t.Fatalf("Negative balance mismatch, want %v, got %v", i.neg, neg) + } + } +} + +func TestBalanceTimeCost(t *testing.T) { + var ( + clock = &mclock.Simulated{} + tracker = balanceTracker{} + ) + tracker.init(clock, 1000) + defer tracker.stop(clock.Now()) + tracker.setFactors(false, 1, 1) + tracker.setFactors(true, 1, 1) + + tracker.setBalance(uint64(time.Minute), 0) // 1 minute time allowance + + var inputs = []struct { + runTime time.Duration + expPos uint64 + expNeg uint64 + }{ + {time.Second, uint64(time.Second * 59), 0}, + {0, uint64(time.Second * 59), 0}, + {time.Second * 59, 0, 0}, + {time.Second, 0, uint64(time.Second)}, + } + for _, i := range inputs { + clock.Run(i.runTime) + if pos, _ := tracker.getBalance(clock.Now()); pos != i.expPos { + t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos) + } + if _, neg := tracker.getBalance(clock.Now()); neg != i.expNeg { + t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg) + } + } + + tracker.setBalance(uint64(time.Minute), 0) // Refill 1 minute time allowance + for _, i := range inputs { + clock.Run(i.runTime) + if pos, _ := tracker.getBalance(clock.Now()); pos != i.expPos { + t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos) + } + if _, neg := tracker.getBalance(clock.Now()); neg != i.expNeg { + t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg) + } + } +} + +func TestBalanceReqCost(t *testing.T) { + var ( + clock = &mclock.Simulated{} + tracker = balanceTracker{} + ) + tracker.init(clock, 1000) + defer tracker.stop(clock.Now()) + tracker.setFactors(false, 1, 1) + tracker.setFactors(true, 1, 1) + + tracker.setBalance(uint64(time.Minute), 0) // 1 minute time serving time allowance + var inputs = []struct { + reqCost uint64 + expPos uint64 + expNeg uint64 + }{ + {uint64(time.Second), uint64(time.Second * 59), 0}, + {0, uint64(time.Second * 59), 0}, + {uint64(time.Second * 59), 0, 0}, + {uint64(time.Second), 0, uint64(time.Second)}, + } + for _, i := range inputs { + tracker.requestCost(i.reqCost) + if pos, _ := tracker.getBalance(clock.Now()); pos != i.expPos { + t.Fatalf("Positive balance mismatch, want %v, got %v", i.expPos, pos) + } + if _, neg := tracker.getBalance(clock.Now()); neg != i.expNeg { + t.Fatalf("Negative balance mismatch, want %v, got %v", i.expNeg, neg) + } + } +} + +func TestBalanceToPriority(t *testing.T) { + var ( + clock = &mclock.Simulated{} + tracker = balanceTracker{} + ) + tracker.init(clock, 1000) // cap = 1000 + defer tracker.stop(clock.Now()) + tracker.setFactors(false, 1, 1) + tracker.setFactors(true, 1, 1) + + var inputs = []struct { + pos uint64 + neg uint64 + priority int64 + }{ + {1000, 0, ^int64(1)}, + {2000, 0, ^int64(2)}, // Higher balance, lower priority value + {0, 0, 0}, + {0, 1000, 1000}, + } + for _, i := range inputs { + tracker.setBalance(i.pos, i.neg) + priority := tracker.getPriority(clock.Now()) + if priority != i.priority { + t.Fatalf("Priority mismatch, want %v, got %v", i.priority, priority) + } + } +} + +func TestEstimatedPriority(t *testing.T) { + var ( + clock = &mclock.Simulated{} + tracker = balanceTracker{} + ) + tracker.init(clock, 1000000000) // cap = 1000,000,000 + defer tracker.stop(clock.Now()) + tracker.setFactors(false, 1, 1) + tracker.setFactors(true, 1, 1) + + tracker.setBalance(uint64(time.Minute), 0) + var inputs = []struct { + runTime time.Duration // time cost + futureTime time.Duration // diff of future time + reqCost uint64 // single request cost + priority int64 // expected estimated priority + }{ + {time.Second, time.Second, 0, ^int64(58)}, + {0, time.Second, 0, ^int64(58)}, + + // 2 seconds time cost, 1 second estimated time cost, 10^9 request cost, + // 10^9 estimated request cost per second. + {time.Second, time.Second, 1000000000, ^int64(55)}, + + // 3 seconds time cost, 3 second estimated time cost, 10^9*2 request cost, + // 4*10^9 estimated request cost. + {time.Second, 3 * time.Second, 1000000000, ^int64(48)}, + + // All positive balance is used up + {time.Second * 55, 0, 0, 0}, + + // 1 minute estimated time cost, 4/58 * 10^9 estimated request cost per sec. + {0, time.Minute, 0, int64(time.Minute) + int64(time.Second)*120/29}, + } + for _, i := range inputs { + clock.Run(i.runTime) + tracker.requestCost(i.reqCost) + priority := tracker.estimatedPriority(clock.Now()+mclock.AbsTime(i.futureTime), true) + if priority != i.priority { + t.Fatalf("Estimated priority mismatch, want %v, got %v", i.priority, priority) + } + } +} + +func TestCallbackChecking(t *testing.T) { + var ( + clock = &mclock.Simulated{} + tracker = balanceTracker{} + ) + tracker.init(clock, 1000000) // cap = 1000,000 + defer tracker.stop(clock.Now()) + tracker.setFactors(false, 1, 1) + tracker.setFactors(true, 1, 1) + + var inputs = []struct { + priority int64 + expDiff time.Duration + }{ + {^int64(500), time.Millisecond * 500}, + {0, time.Second}, + {int64(time.Second), 2 * time.Second}, + } + tracker.setBalance(uint64(time.Second), 0) + for _, i := range inputs { + diff, _ := tracker.timeUntil(i.priority) + if diff != i.expDiff { + t.Fatalf("Time difference mismatch, want %v, got %v", i.expDiff, diff) + } + } +} + +func TestCallback(t *testing.T) { + var ( + clock = &mclock.Simulated{} + tracker = balanceTracker{} + ) + tracker.init(clock, 1000) // cap = 1000 + defer tracker.stop(clock.Now()) + tracker.setFactors(false, 1, 1) + tracker.setFactors(true, 1, 1) + + callCh := make(chan struct{}, 1) + tracker.setBalance(uint64(time.Minute), 0) + tracker.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} }) + + clock.Run(time.Minute) + select { + case <-callCh: + case <-time.NewTimer(time.Second).C: + t.Fatalf("Callback hasn't been called yet") + } + + tracker.setBalance(uint64(time.Minute), 0) + tracker.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} }) + tracker.removeCallback(balanceCallbackZero) + + clock.Run(time.Minute) + select { + case <-callCh: + t.Fatalf("Callback shouldn't be called") + case <-time.NewTimer(time.Millisecond * 100).C: + } +} diff --git a/les/clientpool.go b/les/clientpool.go index efba2bf6b..74cbc8f70 100644 --- a/les/clientpool.go +++ b/les/clientpool.go @@ -17,67 +17,81 @@ package les import ( + "encoding/binary" "io" "math" "sync" "time" + "github.com/ethersocial/go-ethersocial/common" "github.com/ethersocial/go-ethersocial/common/mclock" "github.com/ethersocial/go-ethersocial/common/prque" "github.com/ethersocial/go-ethersocial/ethdb" "github.com/ethersocial/go-ethersocial/log" "github.com/ethersocial/go-ethersocial/p2p/enode" "github.com/ethersocial/go-ethersocial/rlp" + "github.com/hashicorp/golang-lru" ) const ( - negBalanceExpTC = time.Hour // time constant for exponentially reducing negative balance - fixedPointMultiplier = 0x1000000 // constant to convert logarithms to fixed point format - connectedBias = time.Minute * 5 // this bias is applied in favor of already connected clients in order to avoid kicking them out very soon - lazyQueueRefresh = time.Second * 10 // refresh period of the connected queue -) - -var ( - clientPoolDbKey = []byte("clientPool") - clientBalanceDbKey = []byte("clientPool-balance") + negBalanceExpTC = time.Hour // time constant for exponentially reducing negative balance + fixedPointMultiplier = 0x1000000 // constant to convert logarithms to fixed point format + lazyQueueRefresh = time.Second * 10 // refresh period of the connected queue + persistCumulativeTimeRefresh = time.Minute * 5 // refresh period of the cumulative running time persistence + posBalanceCacheLimit = 8192 // the maximum number of cached items in positive balance queue + negBalanceCacheLimit = 8192 // the maximum number of cached items in negative balance queue + + // connectedBias is applied to already connected clients So that + // already connected client won't be kicked out very soon and we + // can ensure all connected clients can have enough time to request + // or sync some data. + // + // todo(rjl493456442) make it configurable. It can be the option of + // free trial time! + connectedBias = time.Minute * 3 ) // clientPool implements a client database that assigns a priority to each client // based on a positive and negative balance. Positive balance is externally assigned // to prioritized clients and is decreased with connection time and processed // requests (unless the price factors are zero). If the positive balance is zero -// then negative balance is accumulated. Balance tracking and priority calculation -// for connected clients is done by balanceTracker. connectedQueue ensures that -// clients with the lowest positive or highest negative balance get evicted when -// the total capacity allowance is full and new clients with a better balance want -// to connect. Already connected nodes receive a small bias in their favor in order -// to avoid accepting and instantly kicking out clients. -// Balances of disconnected clients are stored in posBalanceQueue and negBalanceQueue -// and are also saved in the database. Negative balance is transformed into a -// logarithmic form with a constantly shifting linear offset in order to implement -// an exponential decrease. negBalanceQueue has a limited size and drops the smallest -// values when necessary. Positive balances are stored in the database as long as -// they exist, posBalanceQueue only acts as a cache for recently accessed entries. +// then negative balance is accumulated. +// +// Balance tracking and priority calculation for connected clients is done by +// balanceTracker. connectedQueue ensures that clients with the lowest positive or +// highest negative balance get evicted when the total capacity allowance is full +// and new clients with a better balance want to connect. +// +// Already connected nodes receive a small bias in their favor in order to avoid +// accepting and instantly kicking out clients. In theory, we try to ensure that +// each client can have several minutes of connection time. +// +// Balances of disconnected clients are stored in nodeDB including positive balance +// and negative banalce. Negative balance is transformed into a logarithmic form +// with a constantly shifting linear offset in order to implement an exponential +// decrease. Besides nodeDB will have a background thread to check the negative +// balance of disconnected client. If the balance is low enough, then the record +// will be dropped. type clientPool struct { - db ethdb.Database + ndb *nodeDB lock sync.Mutex clock mclock.Clock - stopCh chan chan struct{} + stopCh chan struct{} closed bool removePeer func(enode.ID) - queueLimit, countLimit int - freeClientCap, capacityLimit, connectedCapacity uint64 + connectedMap map[enode.ID]*clientInfo + connectedQueue *prque.LazyQueue + + posFactors, negFactors priceFactors - connectedMap map[enode.ID]*clientInfo - posBalanceMap map[enode.ID]*posBalance - negBalanceMap map[string]*negBalance - connectedQueue *prque.LazyQueue - posBalanceQueue, negBalanceQueue *prque.Prque - posFactors, negFactors priceFactors - posBalanceAccessCounter int64 - startupTime mclock.AbsTime - logOffsetAtStartup int64 + connLimit int // The maximum number of connections that clientpool can support + capLimit uint64 // The maximum cumulative capacity that clientpool can support + connectedCap uint64 // The sum of the capacity of the current clientpool connected + freeClientCap uint64 // The capacity value of each free client + startTime mclock.AbsTime // The timestamp at which the clientpool started running + cumulativeTime int64 // The cumulative running time of clientpool at the start point. + disableBias bool // Disable connection bias(used in testing) } // clientPeer represents a client in the pool. @@ -138,22 +152,25 @@ type priceFactors struct { } // newClientPool creates a new client pool -func newClientPool(db ethdb.Database, freeClientCap uint64, queueLimit int, clock mclock.Clock, removePeer func(enode.ID)) *clientPool { +func newClientPool(db ethdb.Database, freeClientCap uint64, clock mclock.Clock, removePeer func(enode.ID)) *clientPool { + ndb := newNodeDB(db, clock) pool := &clientPool{ - db: db, - clock: clock, - connectedMap: make(map[enode.ID]*clientInfo), - posBalanceMap: make(map[enode.ID]*posBalance), - negBalanceMap: make(map[string]*negBalance), - connectedQueue: prque.NewLazyQueue(connSetIndex, connPriority, connMaxPriority, clock, lazyQueueRefresh), - negBalanceQueue: prque.New(negSetIndex), - posBalanceQueue: prque.New(posSetIndex), - freeClientCap: freeClientCap, - queueLimit: queueLimit, - removePeer: removePeer, - stopCh: make(chan chan struct{}), - } - pool.loadFromDb() + ndb: ndb, + clock: clock, + connectedMap: make(map[enode.ID]*clientInfo), + connectedQueue: prque.NewLazyQueue(connSetIndex, connPriority, connMaxPriority, clock, lazyQueueRefresh), + freeClientCap: freeClientCap, + removePeer: removePeer, + startTime: clock.Now(), + cumulativeTime: ndb.getCumulativeTime(), + stopCh: make(chan struct{}), + } + // If the negative balance of free client is even lower than 1, + // delete this entry. + ndb.nbEvictCallBack = func(now mclock.AbsTime, b negBalance) bool { + balance := math.Exp(float64(b.logValue-pool.logOffset(now)) / fixedPointMultiplier) + return balance <= 1 + } go func() { for { select { @@ -161,8 +178,9 @@ func newClientPool(db ethdb.Database, freeClientCap uint64, queueLimit int, cloc pool.lock.Lock() pool.connectedQueue.Refresh() pool.lock.Unlock() - case stop := <-pool.stopCh: - close(stop) + case <-clock.After(persistCumulativeTimeRefresh): + pool.ndb.setCumulativeTime(pool.logOffset(clock.Now())) + case <-pool.stopCh: return } } @@ -172,13 +190,12 @@ func newClientPool(db ethdb.Database, freeClientCap uint64, queueLimit int, cloc // stop shuts the client pool down func (f *clientPool) stop() { - stop := make(chan struct{}) - f.stopCh <- stop - <-stop + close(f.stopCh) f.lock.Lock() f.closed = true - f.saveToDb() f.lock.Unlock() + f.ndb.setCumulativeTime(f.logOffset(f.clock.Now())) + f.ndb.close() } // connect should be called after a successful handshake. If the connection was @@ -187,7 +204,7 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) bool { f.lock.Lock() defer f.lock.Unlock() - // Short circuit is clientPool is already closed. + // Short circuit if clientPool is already closed. if f.closed { return false } @@ -199,14 +216,19 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) bool { return false } // Create a clientInfo but do not add it yet - now := f.clock.Now() - posBalance := f.getPosBalance(id).value + var ( + posBalance uint64 + negBalance uint64 + now = f.clock.Now() + ) + pb := f.ndb.getOrNewPB(id) + posBalance = pb.value e := &clientInfo{pool: f, peer: peer, address: freeID, queueIndex: -1, id: id, priority: posBalance != 0} - var negBalance uint64 - nb := f.negBalanceMap[freeID] - if nb != nil { + nb := f.ndb.getOrNewNB(freeID) + if nb.logValue != 0 { negBalance = uint64(math.Exp(float64(nb.logValue-f.logOffset(now)) / fixedPointMultiplier)) + negBalance *= uint64(time.Second) } // If the client is a free client, assign with a low free capacity, // Otherwise assign with the given value(priority client) @@ -219,6 +241,7 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) bool { } e.capacity = capacity + // Starts a balance tracker e.balanceTracker.init(f.clock, capacity) e.balanceTracker.setBalance(posBalance, negBalance) f.setClientPriceFactors(e) @@ -228,9 +251,9 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) bool { // // If the priority of the newly added client is lower than the priority of // all connected clients, the client is rejected. - newCapacity := f.connectedCapacity + capacity + newCapacity := f.connectedCap + capacity newCount := f.connectedQueue.Size() + 1 - if newCapacity > f.capacityLimit || newCount > f.countLimit { + if newCapacity > f.capLimit || newCount > f.connLimit { var ( kickList []*clientInfo kickPriority int64 @@ -241,10 +264,13 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) bool { kickPriority = priority newCapacity -= c.capacity newCount-- - return newCapacity > f.capacityLimit || newCount > f.countLimit + return newCapacity > f.capLimit || newCount > f.connLimit }) - if newCapacity > f.capacityLimit || newCount > f.countLimit || (e.balanceTracker.estimatedPriority(now+mclock.AbsTime(connectedBias), false)-kickPriority) > 0 { - // reject client + bias := connectedBias + if f.disableBias { + bias = 0 + } + if newCapacity > f.capLimit || newCount > f.connLimit || (e.balanceTracker.estimatedPriority(now+mclock.AbsTime(bias), false)-kickPriority) > 0 { for _, c := range kickList { f.connectedQueue.Push(c) } @@ -257,21 +283,22 @@ func (f *clientPool) connect(peer clientPeer, capacity uint64) bool { f.dropClient(c, now, true) } } - // client accepted, finish setting it up - if nb != nil { - delete(f.negBalanceMap, freeID) - f.negBalanceQueue.Remove(nb.queueIndex) - } + // Register new client to connection queue. + f.connectedMap[id] = e + f.connectedQueue.Push(e) + f.connectedCap += e.capacity + + // If the current client is a paid client, monitor the status of client, + // downgrade it to normal client if positive balance is used up. if e.priority { e.balanceTracker.addCallback(balanceCallbackZero, 0, func() { f.balanceExhausted(id) }) } - f.connectedMap[id] = e - f.connectedQueue.Push(e) - f.connectedCapacity += e.capacity - totalConnectedGauge.Update(int64(f.connectedCapacity)) + // If the capacity of client is not the default value(free capacity), notify + // it to update capacity. if e.capacity != f.freeClientCap { e.peer.updateCapacity(e.capacity) } + totalConnectedGauge.Update(int64(f.connectedCap)) clientConnectedMeter.Mark(1) log.Debug("Client accepted", "address", freeID) return true @@ -284,15 +311,14 @@ func (f *clientPool) disconnect(p clientPeer) { f.lock.Lock() defer f.lock.Unlock() + // Short circuit if client pool is already closed. if f.closed { return } - address := p.freeClientId() - id := p.ID() // Short circuit if the peer hasn't been registered. - e := f.connectedMap[id] + e := f.connectedMap[p.ID()] if e == nil { - log.Debug("Client not connected", "address", address, "id", peerIdToString(id)) + log.Debug("Client not connected", "address", p.freeClientId(), "id", peerIdToString(p.ID())) return } f.dropClient(e, f.clock.Now(), false) @@ -307,8 +333,8 @@ func (f *clientPool) dropClient(e *clientInfo, now mclock.AbsTime, kick bool) { f.finalizeBalance(e, now) f.connectedQueue.Remove(e.queueIndex) delete(f.connectedMap, e.id) - f.connectedCapacity -= e.capacity - totalConnectedGauge.Update(int64(f.connectedCapacity)) + f.connectedCap -= e.capacity + totalConnectedGauge.Update(int64(f.connectedCap)) if kick { clientKickedMeter.Mark(1) log.Debug("Client kicked out", "address", e.address) @@ -324,18 +350,17 @@ func (f *clientPool) dropClient(e *clientInfo, now mclock.AbsTime, kick bool) { func (f *clientPool) finalizeBalance(c *clientInfo, now mclock.AbsTime) { c.balanceTracker.stop(now) pos, neg := c.balanceTracker.getBalance(now) - pb := f.getPosBalance(c.id) + + pb, nb := f.ndb.getOrNewPB(c.id), f.ndb.getOrNewNB(c.address) pb.value = pos - f.storePosBalance(pb) - if neg < 1 { - neg = 1 - } - nb := &negBalance{address: c.address, queueIndex: -1, logValue: int64(math.Log(float64(neg))*fixedPointMultiplier) + f.logOffset(now)} - f.negBalanceMap[c.address] = nb - f.negBalanceQueue.Push(nb, -nb.logValue) - if f.negBalanceQueue.Size() > f.queueLimit { - nn := f.negBalanceQueue.PopItem().(*negBalance) - delete(f.negBalanceMap, nn.address) + f.ndb.setPB(c.id, pb) + + neg /= uint64(time.Second) // Convert the expanse to second level. + if neg > 1 { + nb.logValue = int64(math.Log(float64(neg))*fixedPointMultiplier) + f.logOffset(now) + f.ndb.setNB(c.address, nb) + } else { + f.ndb.delNB(c.address) // Negative balance is small enough, drop it directly. } } @@ -351,27 +376,26 @@ func (f *clientPool) balanceExhausted(id enode.ID) { } c.priority = false if c.capacity != f.freeClientCap { - f.connectedCapacity += f.freeClientCap - c.capacity - totalConnectedGauge.Update(int64(f.connectedCapacity)) + f.connectedCap += f.freeClientCap - c.capacity + totalConnectedGauge.Update(int64(f.connectedCap)) c.capacity = f.freeClientCap c.peer.updateCapacity(c.capacity) } + f.ndb.delPB(id) } // setConnLimit sets the maximum number and total capacity of connected clients, // dropping some of them if necessary. -func (f *clientPool) setLimits(count int, totalCap uint64) { +func (f *clientPool) setLimits(totalConn int, totalCap uint64) { f.lock.Lock() defer f.lock.Unlock() - f.countLimit = count - f.capacityLimit = totalCap - if f.connectedCapacity > f.capacityLimit || f.connectedQueue.Size() > f.countLimit { - now := mclock.Now() + f.connLimit = totalConn + f.capLimit = totalCap + if f.connectedCap > f.capLimit || f.connectedQueue.Size() > f.connLimit { f.connectedQueue.MultiPop(func(data interface{}, priority int64) bool { - c := data.(*clientInfo) - f.dropClient(c, now, true) - return f.connectedCapacity > f.capacityLimit || f.connectedQueue.Size() > f.countLimit + f.dropClient(data.(*clientInfo), mclock.Now(), true) + return f.connectedCap > f.capLimit || f.connectedQueue.Size() > f.connLimit }) } } @@ -390,11 +414,14 @@ func (f *clientPool) requestCost(p *peer, cost uint64) { // logOffset calculates the time-dependent offset for the logarithmic // representation of negative balance +// +// From another point of view, the result returned by the function represents +// the total time that the clientpool is cumulatively running(total_hours/multiplier). func (f *clientPool) logOffset(now mclock.AbsTime) int64 { // Note: fixedPointMultiplier acts as a multiplier here; the reason for dividing the divisor // is to avoid int64 overflow. We assume that int64(negBalanceExpTC) >> fixedPointMultiplier. - logDecay := int64((time.Duration(now - f.startupTime)) / (negBalanceExpTC / fixedPointMultiplier)) - return f.logOffsetAtStartup + logDecay + cumulativeTime := int64((time.Duration(now - f.startTime)) / (negBalanceExpTC / fixedPointMultiplier)) + return f.cumulativeTime + cumulativeTime } // setPriceFactors changes pricing factors for both positive and negative balances. @@ -415,100 +442,6 @@ func (f *clientPool) setClientPriceFactors(c *clientInfo) { c.balanceTracker.setFactors(false, f.posFactors.timeFactor+float64(c.capacity)*f.posFactors.capacityFactor/1000000, f.posFactors.requestFactor) } -// clientPoolStorage is the RLP representation of the pool's database storage -type clientPoolStorage struct { - LogOffset uint64 - List []*negBalance -} - -// loadFromDb restores pool status from the database storage -// (automatically called at initialization) -func (f *clientPool) loadFromDb() { - enc, err := f.db.Get(clientPoolDbKey) - if err != nil { - return - } - var storage clientPoolStorage - err = rlp.DecodeBytes(enc, &storage) - if err != nil { - log.Error("Failed to decode client list", "err", err) - return - } - f.logOffsetAtStartup = int64(storage.LogOffset) - f.startupTime = f.clock.Now() - for _, e := range storage.List { - log.Debug("Loaded free client record", "address", e.address, "logValue", e.logValue) - f.negBalanceMap[e.address] = e - f.negBalanceQueue.Push(e, -e.logValue) - } -} - -// saveToDb saves pool status to the database storage -// (automatically called during shutdown) -func (f *clientPool) saveToDb() { - now := f.clock.Now() - storage := clientPoolStorage{ - LogOffset: uint64(f.logOffset(now)), - } - for _, c := range f.connectedMap { - f.finalizeBalance(c, now) - } - i := 0 - storage.List = make([]*negBalance, len(f.negBalanceMap)) - for _, e := range f.negBalanceMap { - storage.List[i] = e - i++ - } - enc, err := rlp.EncodeToBytes(storage) - if err != nil { - log.Error("Failed to encode negative balance list", "err", err) - } else { - f.db.Put(clientPoolDbKey, enc) - } -} - -// storePosBalance stores a single positive balance entry in the database -func (f *clientPool) storePosBalance(b *posBalance) { - if b.value == b.lastStored { - return - } - enc, err := rlp.EncodeToBytes(b) - if err != nil { - log.Error("Failed to encode client balance", "err", err) - } else { - f.db.Put(append(clientBalanceDbKey, b.id[:]...), enc) - b.lastStored = b.value - } -} - -// getPosBalance retrieves a single positive balance entry from cache or the database -func (f *clientPool) getPosBalance(id enode.ID) *posBalance { - if b, ok := f.posBalanceMap[id]; ok { - f.posBalanceQueue.Remove(b.queueIndex) - f.posBalanceAccessCounter-- - f.posBalanceQueue.Push(b, f.posBalanceAccessCounter) - return b - } - balance := &posBalance{} - if enc, err := f.db.Get(append(clientBalanceDbKey, id[:]...)); err == nil { - if err := rlp.DecodeBytes(enc, balance); err != nil { - log.Error("Failed to decode client balance", "err", err) - balance = &posBalance{} - } - } - balance.id = id - balance.queueIndex = -1 - if f.posBalanceQueue.Size() >= f.queueLimit { - b := f.posBalanceQueue.PopItem().(*posBalance) - f.storePosBalance(b) - delete(f.posBalanceMap, b.id) - } - f.posBalanceAccessCounter-- - f.posBalanceQueue.Push(balance, f.posBalanceAccessCounter) - f.posBalanceMap[id] = balance - return balance -} - // addBalance updates the positive balance of a client. // If setTotal is false then the given amount is added to the balance. // If setTotal is true then amount represents the total amount ever added to the @@ -518,11 +451,21 @@ func (f *clientPool) addBalance(id enode.ID, amount uint64, setTotal bool) { f.lock.Lock() defer f.lock.Unlock() - pb := f.getPosBalance(id) + pb := f.ndb.getOrNewPB(id) c := f.connectedMap[id] - var negBalance uint64 if c != nil { - pb.value, negBalance = c.balanceTracker.getBalance(f.clock.Now()) + posBalance, negBalance := c.balanceTracker.getBalance(f.clock.Now()) + pb.value = posBalance + defer func() { + c.balanceTracker.setBalance(pb.value, negBalance) + if !c.priority && pb.value > 0 { + // The capacity should be adjusted based on the requirement, + // but we have no idea about the new capacity, need a second + // call to udpate it. + c.priority = true + c.balanceTracker.addCallback(balanceCallbackZero, 0, func() { f.balanceExhausted(id) }) + } + }() } if setTotal { if pb.value+amount > pb.lastTotal { @@ -535,21 +478,12 @@ func (f *clientPool) addBalance(id enode.ID, amount uint64, setTotal bool) { pb.value += amount pb.lastTotal += amount } - f.storePosBalance(pb) - if c != nil { - c.balanceTracker.setBalance(pb.value, negBalance) - if !c.priority && pb.value > 0 { - c.priority = true - c.balanceTracker.addCallback(balanceCallbackZero, 0, func() { f.balanceExhausted(id) }) - } - } + f.ndb.setPB(id, pb) } // posBalance represents a recently accessed positive balance entry type posBalance struct { - id enode.ID - value, lastStored, lastTotal uint64 - queueIndex int // position in posBalanceQueue + value, lastTotal uint64 } // EncodeRLP implements rlp.Encoder @@ -566,44 +500,207 @@ func (e *posBalance) DecodeRLP(s *rlp.Stream) error { return err } e.value = entry.Value - e.lastStored = entry.Value e.lastTotal = entry.LastTotal return nil } -// posSetIndex callback updates posBalance item index in posBalanceQueue -func posSetIndex(a interface{}, index int) { - a.(*posBalance).queueIndex = index -} - // negBalance represents a negative balance entry of a disconnected client -type negBalance struct { - address string - logValue int64 - queueIndex int // position in negBalanceQueue -} +type negBalance struct{ logValue int64 } // EncodeRLP implements rlp.Encoder func (e *negBalance) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, []interface{}{e.address, uint64(e.logValue)}) + return rlp.Encode(w, []interface{}{uint64(e.logValue)}) } // DecodeRLP implements rlp.Decoder func (e *negBalance) DecodeRLP(s *rlp.Stream) error { var entry struct { - Address string LogValue uint64 } if err := s.Decode(&entry); err != nil { return err } - e.address = entry.Address e.logValue = int64(entry.LogValue) - e.queueIndex = -1 return nil } -// negSetIndex callback updates negBalance item index in negBalanceQueue -func negSetIndex(a interface{}, index int) { - a.(*negBalance).queueIndex = index +const ( + // nodeDBVersion is the version identifier of the node data in db + nodeDBVersion = 0 + + // dbCleanupCycle is the cycle of db for useless data cleanup + dbCleanupCycle = time.Hour +) + +var ( + positiveBalancePrefix = []byte("pb:") // dbVersion(uint16 big endian) + positiveBalancePrefix + id -> balance + negativeBalancePrefix = []byte("nb:") // dbVersion(uint16 big endian) + negativeBalancePrefix + ip -> balance + cumulativeRunningTimeKey = []byte("cumulativeTime:") // dbVersion(uint16 big endian) + cumulativeRunningTimeKey -> cumulativeTime +) + +type nodeDB struct { + db ethdb.Database + pcache *lru.Cache + ncache *lru.Cache + auxbuf []byte // 37-byte auxiliary buffer for key encoding + verbuf [2]byte // 2-byte auxiliary buffer for db version + nbEvictCallBack func(mclock.AbsTime, negBalance) bool // Callback to determine whether the negative balance can be evicted. + clock mclock.Clock + closeCh chan struct{} + cleanupHook func() // Test hook used for testing +} + +func newNodeDB(db ethdb.Database, clock mclock.Clock) *nodeDB { + pcache, _ := lru.New(posBalanceCacheLimit) + ncache, _ := lru.New(negBalanceCacheLimit) + ndb := &nodeDB{ + db: db, + pcache: pcache, + ncache: ncache, + auxbuf: make([]byte, 37), + clock: clock, + closeCh: make(chan struct{}), + } + binary.BigEndian.PutUint16(ndb.verbuf[:], uint16(nodeDBVersion)) + go ndb.expirer() + return ndb +} + +func (db *nodeDB) close() { + close(db.closeCh) +} + +func (db *nodeDB) key(id []byte, neg bool) []byte { + prefix := positiveBalancePrefix + if neg { + prefix = negativeBalancePrefix + } + if len(prefix)+len(db.verbuf)+len(id) > len(db.auxbuf) { + db.auxbuf = append(db.auxbuf, make([]byte, len(prefix)+len(db.verbuf)+len(id)-len(db.auxbuf))...) + } + copy(db.auxbuf[:len(db.verbuf)], db.verbuf[:]) + copy(db.auxbuf[len(db.verbuf):len(db.verbuf)+len(prefix)], prefix) + copy(db.auxbuf[len(prefix)+len(db.verbuf):len(prefix)+len(db.verbuf)+len(id)], id) + return db.auxbuf[:len(prefix)+len(db.verbuf)+len(id)] +} + +func (db *nodeDB) getCumulativeTime() int64 { + blob, err := db.db.Get(append(cumulativeRunningTimeKey, db.verbuf[:]...)) + if err != nil || len(blob) == 0 { + return 0 + } + return int64(binary.BigEndian.Uint64(blob)) +} + +func (db *nodeDB) setCumulativeTime(v int64) { + binary.BigEndian.PutUint64(db.auxbuf[:8], uint64(v)) + db.db.Put(append(cumulativeRunningTimeKey, db.verbuf[:]...), db.auxbuf[:8]) +} + +func (db *nodeDB) getOrNewPB(id enode.ID) posBalance { + key := db.key(id.Bytes(), false) + item, exist := db.pcache.Get(string(key)) + if exist { + return item.(posBalance) + } + var balance posBalance + if enc, err := db.db.Get(key); err == nil { + if err := rlp.DecodeBytes(enc, &balance); err != nil { + log.Error("Failed to decode positive balance", "err", err) + } + } + db.pcache.Add(string(key), balance) + return balance +} + +func (db *nodeDB) setPB(id enode.ID, b posBalance) { + key := db.key(id.Bytes(), false) + enc, err := rlp.EncodeToBytes(&(b)) + if err != nil { + log.Error("Failed to encode positive balance", "err", err) + return + } + db.db.Put(key, enc) + db.pcache.Add(string(key), b) +} + +func (db *nodeDB) delPB(id enode.ID) { + key := db.key(id.Bytes(), false) + db.db.Delete(key) + db.pcache.Remove(string(key)) +} + +func (db *nodeDB) getOrNewNB(id string) negBalance { + key := db.key([]byte(id), true) + item, exist := db.ncache.Get(string(key)) + if exist { + return item.(negBalance) + } + var balance negBalance + if enc, err := db.db.Get(key); err == nil { + if err := rlp.DecodeBytes(enc, &balance); err != nil { + log.Error("Failed to decode negative balance", "err", err) + } + } + db.ncache.Add(string(key), balance) + return balance +} + +func (db *nodeDB) setNB(id string, b negBalance) { + key := db.key([]byte(id), true) + enc, err := rlp.EncodeToBytes(&(b)) + if err != nil { + log.Error("Failed to encode negative balance", "err", err) + return + } + db.db.Put(key, enc) + db.ncache.Add(string(key), b) +} + +func (db *nodeDB) delNB(id string) { + key := db.key([]byte(id), true) + db.db.Delete(key) + db.ncache.Remove(string(key)) +} + +func (db *nodeDB) expirer() { + for { + select { + case <-db.clock.After(dbCleanupCycle): + db.expireNodes() + case <-db.closeCh: + return + } + } +} + +// expireNodes iterates the whole node db and checks whether the negative balance +// entry can deleted. +// +// The rationale behind this is: server doesn't need to keep the negative balance +// records if they are low enough. +func (db *nodeDB) expireNodes() { + var ( + visited int + deleted int + start = time.Now() + ) + iter := db.db.NewIteratorWithPrefix(append(db.verbuf[:], negativeBalancePrefix...)) + for iter.Next() { + visited += 1 + var balance negBalance + if err := rlp.DecodeBytes(iter.Value(), &balance); err != nil { + log.Error("Failed to decode negative balance", "err", err) + continue + } + if db.nbEvictCallBack != nil && db.nbEvictCallBack(db.clock.Now(), balance) { + deleted += 1 + db.db.Delete(iter.Key()) + } + } + // Invoke testing hook if it's not nil. + if db.cleanupHook != nil { + db.cleanupHook() + } + log.Debug("Expire nodes", "visited", visited, "deleted", deleted, "elapsed", common.PrettyDuration(time.Since(start))) } diff --git a/les/clientpool_test.go b/les/clientpool_test.go index 6c18c908f..ccda37d12 100644 --- a/les/clientpool_test.go +++ b/les/clientpool_test.go @@ -17,8 +17,11 @@ package les import ( + "bytes" "fmt" + "math" "math/rand" + "reflect" "testing" "time" @@ -51,7 +54,7 @@ func TestClientPoolL100C300P20(t *testing.T) { testClientPool(t, 100, 300, 20, false) } -const testClientPoolTicks = 500000 +const testClientPoolTicks = 100000 type poolTestPeer int @@ -65,6 +68,14 @@ func (i poolTestPeer) freeClientId() string { func (i poolTestPeer) updateCapacity(uint64) {} +type poolTestPeerWithCap struct { + poolTestPeer + + cap uint64 +} + +func (i *poolTestPeerWithCap) updateCapacity(cap uint64) { i.cap = cap } + func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomDisconnect bool) { rand.Seed(time.Now().UnixNano()) var ( @@ -76,8 +87,9 @@ func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomD disconnFn = func(id enode.ID) { disconnCh <- int(id[0]) + int(id[1])<<8 } - pool = newClientPool(db, 1, 10000, &clock, disconnFn) + pool = newClientPool(db, 1, &clock, disconnFn) ) + pool.disableBias = true pool.setLimits(connLimit, uint64(connLimit)) pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1}) @@ -89,16 +101,9 @@ func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomD t.Fatalf("Test peer #%d rejected", i) } } - // since all accepted peers are new and should not be kicked out, the next one should be rejected - if pool.connect(poolTestPeer(connLimit), 0) { - connected[connLimit] = true - t.Fatalf("Peer accepted over connected limit") - } - // randomly connect and disconnect peers, expect to have a similar total connection time at the end for tickCounter := 0; tickCounter < testClientPoolTicks; tickCounter++ { clock.Run(1 * time.Second) - //time.Sleep(time.Microsecond * 100) if tickCounter == testClientPoolTicks/4 { // give a positive balance to some of the peers @@ -137,11 +142,11 @@ func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomD } expTicks := testClientPoolTicks/2*connLimit/clientCount + testClientPoolTicks/2*(connLimit-paidCount)/(clientCount-paidCount) - expMin := expTicks - expTicks/10 - expMax := expTicks + expTicks/10 + expMin := expTicks - expTicks/5 + expMax := expTicks + expTicks/5 paidTicks := testClientPoolTicks/2*connLimit/clientCount + testClientPoolTicks/2 - paidMin := paidTicks - paidTicks/10 - paidMax := paidTicks + paidTicks/10 + paidMin := paidTicks - paidTicks/5 + paidMax := paidTicks + paidTicks/5 // check if the total connected time of peers are all in the expected range for i, c := range connected { @@ -157,24 +162,380 @@ func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomD t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], min, max) } } + pool.stop() +} - // a previously unknown peer should be accepted now - if !pool.connect(poolTestPeer(54321), 0) { - t.Fatalf("Previously unknown peer rejected") +func TestConnectPaidClient(t *testing.T) { + var ( + clock mclock.Simulated + db = rawdb.NewMemoryDatabase() + ) + pool := newClientPool(db, 1, &clock, nil) + defer pool.stop() + pool.setLimits(10, uint64(10)) + pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1}) + + // Add balance for an external client and mark it as paid client + pool.addBalance(poolTestPeer(0).ID(), 1000, false) + + if !pool.connect(poolTestPeer(0), 10) { + t.Fatalf("Failed to connect paid client") } +} - // close and restart pool - pool.stop() - pool = newClientPool(db, 1, 10000, &clock, func(id enode.ID) {}) - pool.setLimits(connLimit, uint64(connLimit)) +func TestConnectPaidClientToSmallPool(t *testing.T) { + var ( + clock mclock.Simulated + db = rawdb.NewMemoryDatabase() + ) + pool := newClientPool(db, 1, &clock, nil) + defer pool.stop() + pool.setLimits(10, uint64(10)) // Total capacity limit is 10 + pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1}) + + // Add balance for an external client and mark it as paid client + pool.addBalance(poolTestPeer(0).ID(), 1000, false) + + // Connect a fat paid client to pool, should reject it. + if pool.connect(poolTestPeer(0), 100) { + t.Fatalf("Connected fat paid client, should reject it") + } +} + +func TestConnectPaidClientToFullPool(t *testing.T) { + var ( + clock mclock.Simulated + db = rawdb.NewMemoryDatabase() + ) + removeFn := func(enode.ID) {} // Noop + pool := newClientPool(db, 1, &clock, removeFn) + defer pool.stop() + pool.setLimits(10, uint64(10)) // Total capacity limit is 10 + pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1}) - // try connecting all known peers (connLimit should be filled up) - for i := 0; i < clientCount; i++ { - pool.connect(poolTestPeer(i), 0) + for i := 0; i < 10; i++ { + pool.addBalance(poolTestPeer(i).ID(), 1000000000, false) + pool.connect(poolTestPeer(i), 1) } - // expect pool to remember known nodes and kick out one of them to accept a new one - if !pool.connect(poolTestPeer(54322), 0) { - t.Errorf("Previously unknown peer rejected after restarting pool") + pool.addBalance(poolTestPeer(11).ID(), 1000, false) // Add low balance to new paid client + if pool.connect(poolTestPeer(11), 1) { + t.Fatalf("Low balance paid client should be rejected") + } + clock.Run(time.Second) + pool.addBalance(poolTestPeer(12).ID(), 1000000000*60*3, false) // Add high balance to new paid client + if !pool.connect(poolTestPeer(12), 1) { + t.Fatalf("High balance paid client should be accpected") + } +} + +func TestPaidClientKickedOut(t *testing.T) { + var ( + clock mclock.Simulated + db = rawdb.NewMemoryDatabase() + kickedCh = make(chan int, 1) + ) + removeFn := func(id enode.ID) { kickedCh <- int(id[0]) } + pool := newClientPool(db, 1, &clock, removeFn) + defer pool.stop() + pool.setLimits(10, uint64(10)) // Total capacity limit is 10 + pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1}) + + for i := 0; i < 10; i++ { + pool.addBalance(poolTestPeer(i).ID(), 1000000000, false) // 1 second allowance + pool.connect(poolTestPeer(i), 1) + clock.Run(time.Millisecond) + } + clock.Run(time.Second) + clock.Run(connectedBias) + if !pool.connect(poolTestPeer(11), 0) { + t.Fatalf("Free client should be accectped") + } + select { + case id := <-kickedCh: + if id != 0 { + t.Fatalf("Kicked client mismatch, want %v, got %v", 0, id) + } + case <-time.NewTimer(time.Second).C: + t.Fatalf("timeout") + } +} + +func TestConnectFreeClient(t *testing.T) { + var ( + clock mclock.Simulated + db = rawdb.NewMemoryDatabase() + ) + pool := newClientPool(db, 1, &clock, nil) + defer pool.stop() + pool.setLimits(10, uint64(10)) + pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1}) + if !pool.connect(poolTestPeer(0), 10) { + t.Fatalf("Failed to connect free client") + } +} + +func TestConnectFreeClientToFullPool(t *testing.T) { + var ( + clock mclock.Simulated + db = rawdb.NewMemoryDatabase() + ) + removeFn := func(enode.ID) {} // Noop + pool := newClientPool(db, 1, &clock, removeFn) + defer pool.stop() + pool.setLimits(10, uint64(10)) // Total capacity limit is 10 + pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1}) + + for i := 0; i < 10; i++ { + pool.connect(poolTestPeer(i), 1) + } + if pool.connect(poolTestPeer(11), 1) { + t.Fatalf("New free client should be rejected") + } + clock.Run(time.Minute) + if pool.connect(poolTestPeer(12), 1) { + t.Fatalf("New free client should be rejected") + } + clock.Run(time.Millisecond) + clock.Run(4 * time.Minute) + if !pool.connect(poolTestPeer(13), 1) { + t.Fatalf("Old client connects more than 5min should be kicked") + } +} + +func TestFreeClientKickedOut(t *testing.T) { + var ( + clock mclock.Simulated + db = rawdb.NewMemoryDatabase() + kicked = make(chan int, 10) + ) + removeFn := func(id enode.ID) { kicked <- int(id[0]) } + pool := newClientPool(db, 1, &clock, removeFn) + defer pool.stop() + pool.setLimits(10, uint64(10)) // Total capacity limit is 10 + pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1}) + + for i := 0; i < 10; i++ { + pool.connect(poolTestPeer(i), 1) + clock.Run(time.Millisecond) + } + if pool.connect(poolTestPeer(10), 1) { + t.Fatalf("New free client should be rejected") + } + clock.Run(5 * time.Minute) + for i := 0; i < 10; i++ { + pool.connect(poolTestPeer(i+10), 1) + } + for i := 0; i < 10; i++ { + select { + case id := <-kicked: + if id >= 10 { + t.Fatalf("Old client should be kicked, now got: %d", id) + } + case <-time.NewTimer(time.Second).C: + t.Fatalf("timeout") + } + } +} + +func TestPositiveBalanceCalculation(t *testing.T) { + var ( + clock mclock.Simulated + db = rawdb.NewMemoryDatabase() + kicked = make(chan int, 10) + ) + removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop + pool := newClientPool(db, 1, &clock, removeFn) + defer pool.stop() + pool.setLimits(10, uint64(10)) // Total capacity limit is 10 + pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1}) + + pool.addBalance(poolTestPeer(0).ID(), uint64(time.Minute*3), false) + pool.connect(poolTestPeer(0), 10) + clock.Run(time.Minute) + + pool.disconnect(poolTestPeer(0)) + pb := pool.ndb.getOrNewPB(poolTestPeer(0).ID()) + if pb.value != uint64(time.Minute*2) { + t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb.value) + } +} + +func TestDowngradePriorityClient(t *testing.T) { + var ( + clock mclock.Simulated + db = rawdb.NewMemoryDatabase() + kicked = make(chan int, 10) + ) + removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop + pool := newClientPool(db, 1, &clock, removeFn) + defer pool.stop() + pool.setLimits(10, uint64(10)) // Total capacity limit is 10 + pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1}) + + p := &poolTestPeerWithCap{ + poolTestPeer: poolTestPeer(0), + } + pool.addBalance(p.ID(), uint64(time.Minute), false) + pool.connect(p, 10) + if p.cap != 10 { + t.Fatalf("The capcacity of priority peer hasn't been updated, got: %d", p.cap) + } + + clock.Run(time.Minute) // All positive balance should be used up. + time.Sleep(300 * time.Millisecond) // Ensure the callback is called + if p.cap != 1 { + t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap) + } + pb := pool.ndb.getOrNewPB(poolTestPeer(0).ID()) + if pb.value != 0 { + t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb.value) + } + + pool.addBalance(poolTestPeer(0).ID(), uint64(time.Minute), false) + pb = pool.ndb.getOrNewPB(poolTestPeer(0).ID()) + if pb.value != uint64(time.Minute) { + t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute), pb.value) + } +} + +func TestNegativeBalanceCalculation(t *testing.T) { + var ( + clock mclock.Simulated + db = rawdb.NewMemoryDatabase() + kicked = make(chan int, 10) + ) + removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop + pool := newClientPool(db, 1, &clock, removeFn) + defer pool.stop() + pool.setLimits(10, uint64(10)) // Total capacity limit is 10 + pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1}) + + for i := 0; i < 10; i++ { + pool.connect(poolTestPeer(i), 1) + } + clock.Run(time.Second) + + for i := 0; i < 10; i++ { + pool.disconnect(poolTestPeer(i)) + nb := pool.ndb.getOrNewNB(poolTestPeer(i).freeClientId()) + if nb.logValue != 0 { + t.Fatalf("Short connection shouldn't be recorded") + } + } + + for i := 0; i < 10; i++ { + pool.connect(poolTestPeer(i), 1) + } + clock.Run(time.Minute) + for i := 0; i < 10; i++ { + pool.disconnect(poolTestPeer(i)) + nb := pool.ndb.getOrNewNB(poolTestPeer(i).freeClientId()) + nb.logValue -= pool.logOffset(clock.Now()) + nb.logValue /= fixedPointMultiplier + if nb.logValue != int64(math.Log(float64(time.Minute/time.Second))) { + t.Fatalf("Negative balance mismatch, want %v, got %v", int64(math.Log(float64(time.Minute/time.Second))), nb.logValue) + } + } +} + +func TestNodeDB(t *testing.T) { + ndb := newNodeDB(rawdb.NewMemoryDatabase(), mclock.System{}) + defer ndb.close() + + if !bytes.Equal(ndb.verbuf[:], []byte{0x00, 0x00}) { + t.Fatalf("version buffer mismatch, want %v, got %v", []byte{0x00, 0x00}, ndb.verbuf) + } + var cases = []struct { + id enode.ID + ip string + balance interface{} + positive bool + }{ + {enode.ID{0x00, 0x01, 0x02}, "", posBalance{value: 100, lastTotal: 200}, true}, + {enode.ID{0x00, 0x01, 0x02}, "", posBalance{value: 200, lastTotal: 300}, true}, + {enode.ID{}, "127.0.0.1", negBalance{logValue: 10}, false}, + {enode.ID{}, "127.0.0.1", negBalance{logValue: 20}, false}, + } + for _, c := range cases { + if c.positive { + ndb.setPB(c.id, c.balance.(posBalance)) + if pb := ndb.getOrNewPB(c.id); !reflect.DeepEqual(pb, c.balance.(posBalance)) { + t.Fatalf("Positive balance mismatch, want %v, got %v", c.balance.(posBalance), pb) + } + } else { + ndb.setNB(c.ip, c.balance.(negBalance)) + if nb := ndb.getOrNewNB(c.ip); !reflect.DeepEqual(nb, c.balance.(negBalance)) { + t.Fatalf("Negative balance mismatch, want %v, got %v", c.balance.(negBalance), nb) + } + } + } + for _, c := range cases { + if c.positive { + ndb.delPB(c.id) + if pb := ndb.getOrNewPB(c.id); !reflect.DeepEqual(pb, posBalance{}) { + t.Fatalf("Positive balance mismatch, want %v, got %v", posBalance{}, pb) + } + } else { + ndb.delNB(c.ip) + if nb := ndb.getOrNewNB(c.ip); !reflect.DeepEqual(nb, negBalance{}) { + t.Fatalf("Negative balance mismatch, want %v, got %v", negBalance{}, nb) + } + } + } + ndb.setCumulativeTime(100) + if ndb.getCumulativeTime() != 100 { + t.Fatalf("Cumulative time mismatch, want %v, got %v", 100, ndb.getCumulativeTime()) + } +} + +func TestNodeDBExpiration(t *testing.T) { + var ( + iterated int + done = make(chan struct{}, 1) + ) + callback := func(now mclock.AbsTime, b negBalance) bool { + iterated += 1 + return true + } + clock := &mclock.Simulated{} + ndb := newNodeDB(rawdb.NewMemoryDatabase(), clock) + defer ndb.close() + ndb.nbEvictCallBack = callback + ndb.cleanupHook = func() { done <- struct{}{} } + + var cases = []struct { + ip string + balance negBalance + }{ + {"127.0.0.1", negBalance{logValue: 1}}, + {"127.0.0.2", negBalance{logValue: 1}}, + {"127.0.0.3", negBalance{logValue: 1}}, + {"127.0.0.4", negBalance{logValue: 1}}, + } + for _, c := range cases { + ndb.setNB(c.ip, c.balance) + } + time.Sleep(100 * time.Millisecond) // Ensure the db expirer is registered. + clock.Run(time.Hour + time.Minute) + select { + case <-done: + case <-time.NewTimer(time.Second).C: + t.Fatalf("timeout") + } + if iterated != 4 { + t.Fatalf("Failed to evict useless negative balances, want %v, got %d", 4, iterated) + } + + for _, c := range cases { + ndb.setNB(c.ip, c.balance) + } + clock.Run(time.Hour + time.Minute) + select { + case <-done: + case <-time.NewTimer(time.Second).C: + t.Fatalf("timeout") + } + if iterated != 8 { + t.Fatalf("Failed to evict useless negative balances, want %v, got %d", 4, iterated) } - pool.stop() } diff --git a/les/distributor.go b/les/distributor.go index d246d2168..b70eaea96 100644 --- a/les/distributor.go +++ b/les/distributor.go @@ -110,13 +110,15 @@ func (d *requestDistributor) registerTestPeer(p distPeer) { d.peerLock.Unlock() } -// distMaxWait is the maximum waiting time after which further necessary waiting -// times are recalculated based on new feedback from the servers -const distMaxWait = time.Millisecond * 50 +var ( + // distMaxWait is the maximum waiting time after which further necessary waiting + // times are recalculated based on new feedback from the servers + distMaxWait = time.Millisecond * 50 -// waitForPeers is the time window in which a request does not fail even if it -// has no suitable peers to send to at the moment -const waitForPeers = time.Second * 3 + // waitForPeers is the time window in which a request does not fail even if it + // has no suitable peers to send to at the moment + waitForPeers = time.Second * 3 +) // main event loop func (d *requestDistributor) loop() { diff --git a/les/distributor_test.go b/les/distributor_test.go index 81ca5ef87..385941b33 100644 --- a/les/distributor_test.go +++ b/les/distributor_test.go @@ -86,8 +86,8 @@ func (p *testDistPeer) worker(t *testing.T, checkOrder bool, stop chan struct{}) const ( testDistBufLimit = 10000000 testDistMaxCost = 1000000 - testDistPeerCount = 5 - testDistReqCount = 5000 + testDistPeerCount = 2 + testDistReqCount = 10 testDistMaxResendCount = 3 ) @@ -128,6 +128,9 @@ func testRequestDistributor(t *testing.T, resend bool) { go peers[i].worker(t, !resend, stop) dist.registerTestPeer(peers[i]) } + // Disable the mechanism that we will wait a few time for request + // even there is no suitable peer to send right now. + waitForPeers = 0 var wg sync.WaitGroup diff --git a/les/odr_test.go b/les/odr_test.go index d12bf4a22..e45fddc50 100644 --- a/les/odr_test.go +++ b/les/odr_test.go @@ -188,6 +188,15 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od client.handler.synchronise(client.peer.peer) + // Ensure the client has synced all necessary data. + clientHead := client.handler.backend.blockchain.CurrentHeader() + if clientHead.Number.Uint64() != 4 { + t.Fatalf("Failed to sync the chain with server, head: %v", clientHead.Number.Uint64()) + } + // Disable the mechanism that we will wait a few time for request + // even there is no suitable peer to send right now. + waitForPeers = 0 + test := func(expFail uint64) { // Mark this as a helper to put the failures at the correct lines t.Helper() @@ -196,7 +205,9 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od bhash := rawdb.ReadCanonicalHash(server.db, i) b1 := fn(light.NoOdr, server.db, server.handler.server.chainConfig, server.handler.blockchain, nil, bhash) - ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + // Set the timeout as 1 second here, ensure there is enough time + // for travis to make the action. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) b2 := fn(ctx, client.db, client.handler.backend.chainConfig, nil, client.handler.backend.blockchain, bhash) cancel() diff --git a/les/request_test.go b/les/request_test.go index 14076669d..cf192bfdc 100644 --- a/les/request_test.go +++ b/les/request_test.go @@ -81,8 +81,15 @@ func testAccess(t *testing.T, protocol int, fn accessTestFn) { // Assemble the test environment server, client, tearDown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, true) defer tearDown() + client.handler.synchronise(client.peer.peer) + // Ensure the client has synced all necessary data. + clientHead := client.handler.backend.blockchain.CurrentHeader() + if clientHead.Number.Uint64() != 4 { + t.Fatalf("Failed to sync the chain with server, head: %v", clientHead.Number.Uint64()) + } + test := func(expFail uint64) { for i := uint64(0); i <= server.handler.blockchain.CurrentHeader().Number.Uint64(); i++ { bhash := rawdb.ReadCanonicalHash(server.db, i) diff --git a/les/server.go b/les/server.go index 96b6f416d..e2f215735 100644 --- a/les/server.go +++ b/les/server.go @@ -113,7 +113,7 @@ func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) { maxCapacity = totalRecharge } srv.fcManager.SetCapacityLimits(srv.freeCapacity, maxCapacity, srv.freeCapacity*2) - srv.clientPool = newClientPool(srv.chainDb, srv.freeCapacity, 10000, mclock.System{}, func(id enode.ID) { go srv.peers.Unregister(peerIdToString(id)) }) + srv.clientPool = newClientPool(srv.chainDb, srv.freeCapacity, mclock.System{}, func(id enode.ID) { go srv.peers.Unregister(peerIdToString(id)) }) srv.clientPool.setPriceFactors(priceFactors{0, 1, 1}, priceFactors{0, 1, 1}) checkpoint := srv.latestLocalCheckpoint() @@ -183,9 +183,9 @@ func (s *LesServer) Stop() { s.peers.Close() s.fcManager.Stop() - s.clientPool.stop() s.costTracker.stop() s.handler.stop() + s.clientPool.stop() // client pool should be closed after handler. s.servingQueue.stop() // Note, bloom trie indexer is closed by parent bloombits indexer. diff --git a/les/sync_test.go b/les/sync_test.go index 020566b6d..4e6f2d436 100644 --- a/les/sync_test.go +++ b/les/sync_test.go @@ -30,17 +30,14 @@ import ( ) // Test light syncing which will download all headers from genesis. -func TestLightSyncingLes2(t *testing.T) { testCheckpointSyncing(t, 2, 0) } func TestLightSyncingLes3(t *testing.T) { testCheckpointSyncing(t, 3, 0) } // Test legacy checkpoint syncing which will download tail headers // based on a hardcoded checkpoint. -func TestLegacyCheckpointSyncingLes2(t *testing.T) { testCheckpointSyncing(t, 2, 1) } func TestLegacyCheckpointSyncingLes3(t *testing.T) { testCheckpointSyncing(t, 3, 1) } // Test checkpoint syncing which will download tail headers based // on a verified checkpoint. -func TestCheckpointSyncingLes2(t *testing.T) { testCheckpointSyncing(t, 2, 2) } func TestCheckpointSyncingLes3(t *testing.T) { testCheckpointSyncing(t, 3, 2) } func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) { @@ -92,7 +89,7 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) { for { _, hash, _, err := server.handler.server.oracle.contract.Contract().GetLatestCheckpoint(nil) if err != nil || hash == [32]byte{} { - time.Sleep(100 * time.Millisecond) + time.Sleep(10 * time.Millisecond) continue } break diff --git a/les/test_helper.go b/les/test_helper.go index c57620518..e46ecee98 100644 --- a/les/test_helper.go +++ b/les/test_helper.go @@ -71,10 +71,10 @@ var ( var ( // The block frequency for creating checkpoint(only used in test) - sectionSize = big.NewInt(512) + sectionSize = big.NewInt(128) // The number of confirmations needed to generate a checkpoint(only used in test). - processConfirms = big.NewInt(4) + processConfirms = big.NewInt(1) // The token bucket buffer limit for testing purpose. testBufLimit = uint64(1000000) @@ -280,7 +280,7 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da } server.costTracker, server.freeCapacity = newCostTracker(db, server.config) server.costTracker.testCostList = testCostList(0) // Disable flow control mechanism. - server.clientPool = newClientPool(db, 1, 10000, clock, nil) + server.clientPool = newClientPool(db, 1, clock, nil) server.clientPool.setLimits(10000, 10000) // Assign enough capacity for clientpool server.handler = newServerHandler(server, simulation.Blockchain(), db, txpool, func() bool { return true }) if server.oracle != nil { @@ -517,7 +517,7 @@ func newClientServerEnv(t *testing.T, blocks int, protocol int, callback indexer if connect { cpeer, err1, speer, err2 = newTestPeerPair("peer", protocol, server, client) select { - case <-time.After(time.Millisecond * 100): + case <-time.After(time.Millisecond * 300): case err := <-err1: t.Fatalf("peer 1 handshake error: %v", err) case err := <-err2: diff --git a/light/postprocess.go b/light/postprocess.go index b14d9203d..064efc087 100644 --- a/light/postprocess.go +++ b/light/postprocess.go @@ -79,21 +79,21 @@ var ( } // TestServerIndexerConfig wraps a set of configs as a test indexer config for server side. TestServerIndexerConfig = &IndexerConfig{ - ChtSize: 512, - ChtConfirms: 4, - BloomSize: 64, - BloomConfirms: 4, - BloomTrieSize: 512, - BloomTrieConfirms: 4, + ChtSize: 128, + ChtConfirms: 1, + BloomSize: 16, + BloomConfirms: 1, + BloomTrieSize: 128, + BloomTrieConfirms: 1, } // TestClientIndexerConfig wraps a set of configs as a test indexer config for client side. TestClientIndexerConfig = &IndexerConfig{ - ChtSize: 512, - ChtConfirms: 32, - BloomSize: 512, - BloomConfirms: 32, - BloomTrieSize: 512, - BloomTrieConfirms: 32, + ChtSize: 128, + ChtConfirms: 8, + BloomSize: 128, + BloomConfirms: 8, + BloomTrieSize: 128, + BloomTrieConfirms: 8, } ) diff --git a/log/README.md b/log/README.md index b4476577b..47426806d 100644 --- a/log/README.md +++ b/log/README.md @@ -1,8 +1,8 @@ -![obligatory xkcd](http://imgs.xkcd.com/comics/standards.png) +![obligatory xkcd](https://imgs.xkcd.com/comics/standards.png) # log15 [![godoc reference](https://godoc.org/github.com/inconshreveable/log15?status.png)](https://godoc.org/github.com/inconshreveable/log15) [![Build Status](https://travis-ci.org/inconshreveable/log15.svg?branch=master)](https://travis-ci.org/inconshreveable/log15) -Package log15 provides an opinionated, simple toolkit for best-practice logging in Go (golang) that is both human and machine readable. It is modeled after the Go standard library's [`io`](http://golang.org/pkg/io/) and [`net/http`](http://golang.org/pkg/net/http/) packages and is an alternative to the standard library's [`log`](http://golang.org/pkg/log/) package. +Package log15 provides an opinionated, simple toolkit for best-practice logging in Go (golang) that is both human and machine readable. It is modeled after the Go standard library's [`io`](https://golang.org/pkg/io/) and [`net/http`](https://golang.org/pkg/net/http/) packages and is an alternative to the standard library's [`log`](https://golang.org/pkg/log/) package. ## Features - A simple, easy-to-understand API diff --git a/metrics/README.md b/metrics/README.md index bc2a45a83..e2d794500 100644 --- a/metrics/README.md +++ b/metrics/README.md @@ -5,7 +5,7 @@ go-metrics Go port of Coda Hale's Metrics library: . -Documentation: . +Documentation: . Usage ----- @@ -128,7 +128,7 @@ go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com") Maintain all metrics along with expvars at `/debug/metrics`: -This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/) +This uses the same mechanism as [the official expvar](https://golang.org/pkg/expvar/) but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars as well as all your go-metrics. diff --git a/miner/worker_test.go b/miner/worker_test.go index d0c544395..a2731cd33 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -18,9 +18,11 @@ package miner import ( "math/big" + "math/rand" "testing" "time" + "github.com/ethersocial/go-ethersocial/accounts" "github.com/ethersocial/go-ethersocial/common" "github.com/ethersocial/go-ethersocial/consensus" "github.com/ethersocial/go-ethersocial/consensus/clique" @@ -35,6 +37,15 @@ import ( "github.com/ethersocial/go-ethersocial/params" ) +const ( + // testCode is the testing contract binary code which will initialises some + // variables in constructor + testCode = "0x60806040527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0060005534801561003457600080fd5b5060fc806100436000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c80630c4dae8814603757806398a213cf146053575b600080fd5b603d607e565b6040518082815260200191505060405180910390f35b607c60048036036020811015606757600080fd5b81019080803590602001909291905050506084565b005b60005481565b806000819055507fe9e44f9f7da8c559de847a3232b57364adc0354f15a2cd8dc636d54396f9587a6000546040518082815260200191505060405180910390a15056fea265627a7a723058208ae31d9424f2d0bc2a3da1a5dd659db2d71ec322a17db8f87e19e209e3a1ff4a64736f6c634300050a0032" + + // testGas is the gas required for contract deployment. + testGas = 144109 +) + var ( // Test chain configurations testTxPoolConfig core.TxPoolConfig @@ -73,6 +84,7 @@ func init() { pendingTxs = append(pendingTxs, tx1) tx2, _ := types.SignTx(types.NewTransaction(1, testUserAddress, big.NewInt(1000), params.TxGas, nil, nil), types.HomesteadSigner{}, testBankKey) newTxs = append(newTxs, tx2) + rand.Seed(time.Now().UnixNano()) } // testWorkerBackend implements worker.Backend interfaces and wraps all information needed during the testing. @@ -81,29 +93,30 @@ type testWorkerBackend struct { txPool *core.TxPool chain *core.BlockChain testTxFeed event.Feed + genesis *core.Genesis uncleBlock *types.Block } -func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, n int) *testWorkerBackend { - var ( - db = rawdb.NewMemoryDatabase() - gspec = core.Genesis{ - Config: chainConfig, - Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}, - } - ) +func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, n int) *testWorkerBackend { + var gspec = core.Genesis{ + Config: chainConfig, + Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}, + } - switch engine.(type) { + switch e := engine.(type) { case *clique.Clique: gspec.ExtraData = make([]byte, 32+common.AddressLength+crypto.SignatureLength) - copy(gspec.ExtraData[32:], testBankAddress[:]) + copy(gspec.ExtraData[32:32+common.AddressLength], testBankAddress.Bytes()) + e.Authorize(testBankAddress, func(account accounts.Account, s string, data []byte) ([]byte, error) { + return crypto.Sign(crypto.Keccak256(data), testBankKey) + }) case *ethash.Ethash: default: t.Fatalf("unexpected consensus engine type: %T", engine) } genesis := gspec.MustCommit(db) - chain, _ := core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil) + chain, _ := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec.Config, engine, vm.Config{}, nil) txpool := core.NewTxPool(testTxPoolConfig, chainConfig, chain) // Generate a small n-block chain and an uncle block for it @@ -127,6 +140,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine db: db, chain: chain, txPool: txpool, + genesis: &gspec, uncleBlock: blocks[0], } } @@ -137,14 +151,124 @@ func (b *testWorkerBackend) PostChainEvents(events []interface{}) { b.chain.PostChainEvents(events, nil) } -func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, blocks int) (*worker, *testWorkerBackend) { - backend := newTestWorkerBackend(t, chainConfig, engine, blocks) +func (b *testWorkerBackend) newRandomUncle() *types.Block { + var parent *types.Block + cur := b.chain.CurrentBlock() + if cur.NumberU64() == 0 { + parent = b.chain.Genesis() + } else { + parent = b.chain.GetBlockByHash(b.chain.CurrentBlock().ParentHash()) + } + blocks, _ := core.GenerateChain(b.chain.Config(), parent, b.chain.Engine(), b.db, 1, func(i int, gen *core.BlockGen) { + var addr = make([]byte, common.AddressLength) + rand.Read(addr) + gen.SetCoinbase(common.BytesToAddress(addr)) + }) + return blocks[0] +} + +func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { + var tx *types.Transaction + if creation { + tx, _ = types.SignTx(types.NewContractCreation(b.txPool.Nonce(testBankAddress), big.NewInt(0), testGas, nil, common.FromHex(testCode)), types.HomesteadSigner{}, testBankKey) + } else { + tx, _ = types.SignTx(types.NewTransaction(b.txPool.Nonce(testBankAddress), testUserAddress, big.NewInt(1000), params.TxGas, nil, nil), types.HomesteadSigner{}, testBankKey) + } + return tx +} + +func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*worker, *testWorkerBackend) { + backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks) backend.txPool.AddLocals(pendingTxs) w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil) w.setEtherbase(testBankAddress) return w, backend } +func TestGenerateBlockAndImportEthash(t *testing.T) { + testGenerateBlockAndImport(t, false) +} + +func TestGenerateBlockAndImportClique(t *testing.T) { + testGenerateBlockAndImport(t, true) +} + +func testGenerateBlockAndImport(t *testing.T, isClique bool) { + var ( + engine consensus.Engine + chainConfig *params.ChainConfig + db = rawdb.NewMemoryDatabase() + ) + if isClique { + chainConfig = params.AllCliqueProtocolChanges + chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} + engine = clique.New(chainConfig.Clique, db) + } else { + chainConfig = params.AllEthashProtocolChanges + engine = ethash.NewFaker() + } + + w, b := newTestWorker(t, chainConfig, engine, db, 0) + defer w.close() + + db2 := rawdb.NewMemoryDatabase() + b.genesis.MustCommit(db2) + chain, _ := core.NewBlockChain(db2, nil, b.chain.Config(), engine, vm.Config{}, nil) + defer chain.Stop() + + newBlock := make(chan struct{}) + listenNewBlock := func() { + sub := w.mux.Subscribe(core.NewMinedBlockEvent{}) + defer sub.Unsubscribe() + + for item := range sub.Chan() { + block := item.Data.(core.NewMinedBlockEvent).Block + _, err := chain.InsertChain([]*types.Block{block}) + if err != nil { + t.Fatalf("Failed to insert new mined block:%d, error:%v", block.NumberU64(), err) + } + newBlock <- struct{}{} + } + } + + // Ensure worker has finished initialization + for { + b := w.pendingBlock() + if b != nil && b.NumberU64() == 1 { + break + } + } + w.start() // Start mining! + + // Ignore first 2 commits caused by start operation + ignored := make(chan struct{}, 2) + w.skipSealHook = func(task *task) bool { + ignored <- struct{}{} + return true + } + for i := 0; i < 2; i++ { + <-ignored + } + + go listenNewBlock() + + // Ignore empty commit here for less noise + w.skipSealHook = func(task *task) bool { + return len(task.receipts) == 0 + } + for i := 0; i < 5; i++ { + b.txPool.AddLocal(b.newRandomTx(true)) + b.txPool.AddLocal(b.newRandomTx(false)) + b.PostChainEvents([]interface{}{core.ChainSideEvent{Block: b.newRandomUncle()}}) + b.PostChainEvents([]interface{}{core.ChainSideEvent{Block: b.newRandomUncle()}}) + select { + case <-newBlock: + case <-time.NewTimer(3 * time.Second).C: // Worker needs 1s to include new changes. + t.Fatalf("timeout") + } + } +} + func TestPendingStateAndBlockEthash(t *testing.T) { testPendingStateAndBlock(t, ethashChainConfig, ethash.NewFaker()) } @@ -155,7 +279,7 @@ func TestPendingStateAndBlockClique(t *testing.T) { func testPendingStateAndBlock(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { defer engine.Close() - w, b := newTestWorker(t, chainConfig, engine, 0) + w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) defer w.close() // Ensure snapshot has been updated. @@ -187,7 +311,7 @@ func TestEmptyWorkClique(t *testing.T) { func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { defer engine.Close() - w, _ := newTestWorker(t, chainConfig, engine, 0) + w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) defer w.close() var ( @@ -241,7 +365,7 @@ func TestStreamUncleBlock(t *testing.T) { ethash := ethash.NewFaker() defer ethash.Close() - w, b := newTestWorker(t, ethashChainConfig, ethash, 1) + w, b := newTestWorker(t, ethashChainConfig, ethash, rawdb.NewMemoryDatabase(), 1) defer w.close() var taskCh = make(chan struct{}) @@ -304,7 +428,7 @@ func TestRegenerateMiningBlockClique(t *testing.T) { func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { defer engine.Close() - w, b := newTestWorker(t, chainConfig, engine, 0) + w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) defer w.close() var taskCh = make(chan struct{}) @@ -369,7 +493,7 @@ func TestAdjustIntervalClique(t *testing.T) { func testAdjustInterval(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { defer engine.Close() - w, _ := newTestWorker(t, chainConfig, engine, 0) + w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) defer w.close() w.skipSealHook = func(task *task) bool { diff --git a/p2p/dial.go b/p2p/dial.go index 84af21751..8f845ce04 100644 --- a/p2p/dial.go +++ b/p2p/dial.go @@ -33,12 +33,7 @@ const ( // private networks. dialHistoryExpiration = inboundThrottleTime + 5*time.Second - // Discovery lookups are throttled and can only run - // once every few seconds. - lookupInterval = 4 * time.Second - - // If no peers are found for this amount of time, the initial bootnodes are - // attempted to be connected. + // If no peers are found for this amount of time, the initial bootnodes are dialed. fallbackInterval = 20 * time.Second // Endpoint resolution is throttled with bounded backoff. @@ -52,6 +47,10 @@ type NodeDialer interface { Dial(*enode.Node) (net.Conn, error) } +type nodeResolver interface { + Resolve(*enode.Node) *enode.Node +} + // TCPDialer implements the NodeDialer interface by using a net.Dialer to // create TCP connections to nodes in the network type TCPDialer struct { @@ -69,7 +68,6 @@ func (t TCPDialer) Dial(dest *enode.Node) (net.Conn, error) { // of the main loop in Server.run. type dialstate struct { maxDynDials int - ntab discoverTable netrestrict *netutil.Netlist self enode.ID bootnodes []*enode.Node // default dials when there are no peers @@ -79,55 +77,23 @@ type dialstate struct { lookupRunning bool dialing map[enode.ID]connFlag lookupBuf []*enode.Node // current discovery lookup results - randomNodes []*enode.Node // filled from Table static map[enode.ID]*dialTask hist expHeap } -type discoverTable interface { - Close() - Resolve(*enode.Node) *enode.Node - LookupRandom() []*enode.Node - ReadRandomNodes([]*enode.Node) int -} - type task interface { Do(*Server) } -// A dialTask is generated for each node that is dialed. Its -// fields cannot be accessed while the task is running. -type dialTask struct { - flags connFlag - dest *enode.Node - lastResolved time.Time - resolveDelay time.Duration -} - -// discoverTask runs discovery table operations. -// Only one discoverTask is active at any time. -// discoverTask.Do performs a random lookup. -type discoverTask struct { - results []*enode.Node -} - -// A waitExpireTask is generated if there are no other tasks -// to keep the loop in Server.run ticking. -type waitExpireTask struct { - time.Duration -} - -func newDialState(self enode.ID, ntab discoverTable, maxdyn int, cfg *Config) *dialstate { +func newDialState(self enode.ID, maxdyn int, cfg *Config) *dialstate { s := &dialstate{ maxDynDials: maxdyn, - ntab: ntab, self: self, netrestrict: cfg.NetRestrict, log: cfg.Logger, static: make(map[enode.ID]*dialTask), dialing: make(map[enode.ID]connFlag), bootnodes: make([]*enode.Node, len(cfg.BootstrapNodes)), - randomNodes: make([]*enode.Node, maxdyn/2), } copy(s.bootnodes, cfg.BootstrapNodes) if s.log == nil { @@ -151,10 +117,6 @@ func (s *dialstate) removeStatic(n *enode.Node) { } func (s *dialstate) newTasks(nRunning int, peers map[enode.ID]*Peer, now time.Time) []task { - if s.start.IsZero() { - s.start = now - } - var newtasks []task addDial := func(flag connFlag, n *enode.Node) bool { if err := s.checkDial(n, peers); err != nil { @@ -166,20 +128,9 @@ func (s *dialstate) newTasks(nRunning int, peers map[enode.ID]*Peer, now time.Ti return true } - // Compute number of dynamic dials necessary at this point. - needDynDials := s.maxDynDials - for _, p := range peers { - if p.rw.is(dynDialedConn) { - needDynDials-- - } - } - for _, flag := range s.dialing { - if flag&dynDialedConn != 0 { - needDynDials-- - } + if s.start.IsZero() { + s.start = now } - - // Expire the dial history on every invocation. s.hist.expire(now) // Create dials for static nodes if they are not connected. @@ -194,6 +145,20 @@ func (s *dialstate) newTasks(nRunning int, peers map[enode.ID]*Peer, now time.Ti newtasks = append(newtasks, t) } } + + // Compute number of dynamic dials needed. + needDynDials := s.maxDynDials + for _, p := range peers { + if p.rw.is(dynDialedConn) { + needDynDials-- + } + } + for _, flag := range s.dialing { + if flag&dynDialedConn != 0 { + needDynDials-- + } + } + // If we don't have any peers whatsoever, try to dial a random bootnode. This // scenario is useful for the testnet (and private networks) where the discovery // table might be full of mostly bad peers, making it hard to find good ones. @@ -201,24 +166,12 @@ func (s *dialstate) newTasks(nRunning int, peers map[enode.ID]*Peer, now time.Ti bootnode := s.bootnodes[0] s.bootnodes = append(s.bootnodes[:0], s.bootnodes[1:]...) s.bootnodes = append(s.bootnodes, bootnode) - if addDial(dynDialedConn, bootnode) { needDynDials-- } } - // Use random nodes from the table for half of the necessary - // dynamic dials. - randomCandidates := needDynDials / 2 - if randomCandidates > 0 { - n := s.ntab.ReadRandomNodes(s.randomNodes) - for i := 0; i < randomCandidates && i < n; i++ { - if addDial(dynDialedConn, s.randomNodes[i]) { - needDynDials-- - } - } - } - // Create dynamic dials from random lookup results, removing tried - // items from the result buffer. + + // Create dynamic dials from discovery results. i := 0 for ; i < len(s.lookupBuf) && needDynDials > 0; i++ { if addDial(dynDialedConn, s.lookupBuf[i]) { @@ -226,10 +179,11 @@ func (s *dialstate) newTasks(nRunning int, peers map[enode.ID]*Peer, now time.Ti } } s.lookupBuf = s.lookupBuf[:copy(s.lookupBuf, s.lookupBuf[i:])] + // Launch a discovery lookup if more candidates are needed. if len(s.lookupBuf) < needDynDials && !s.lookupRunning { s.lookupRunning = true - newtasks = append(newtasks, &discoverTask{}) + newtasks = append(newtasks, &discoverTask{want: needDynDials - len(s.lookupBuf)}) } // Launch a timer to wait for the next node to expire if all @@ -279,6 +233,15 @@ func (s *dialstate) taskDone(t task, now time.Time) { } } +// A dialTask is generated for each node that is dialed. Its +// fields cannot be accessed while the task is running. +type dialTask struct { + flags connFlag + dest *enode.Node + lastResolved time.Time + resolveDelay time.Duration +} + func (t *dialTask) Do(srv *Server) { if t.dest.Incomplete() { if !t.resolve(srv) { @@ -304,8 +267,8 @@ func (t *dialTask) Do(srv *Server) { // discovery network with useless queries for nodes that don't exist. // The backoff delay resets when the node is found. func (t *dialTask) resolve(srv *Server) bool { - if srv.ntab == nil { - srv.log.Debug("Can't resolve node", "id", t.dest.ID, "err", "discovery is disabled") + if srv.staticNodeResolver == nil { + srv.log.Debug("Can't resolve node", "id", t.dest.ID(), "err", "discovery is disabled") return false } if t.resolveDelay == 0 { @@ -314,20 +277,20 @@ func (t *dialTask) resolve(srv *Server) bool { if time.Since(t.lastResolved) < t.resolveDelay { return false } - resolved := srv.ntab.Resolve(t.dest) + resolved := srv.staticNodeResolver.Resolve(t.dest) t.lastResolved = time.Now() if resolved == nil { t.resolveDelay *= 2 if t.resolveDelay > maxResolveDelay { t.resolveDelay = maxResolveDelay } - srv.log.Debug("Resolving node failed", "id", t.dest.ID, "newdelay", t.resolveDelay) + srv.log.Debug("Resolving node failed", "id", t.dest.ID(), "newdelay", t.resolveDelay) return false } // The node was found. t.resolveDelay = initialResolveDelay t.dest = resolved - srv.log.Debug("Resolved node", "id", t.dest.ID, "addr", &net.TCPAddr{IP: t.dest.IP(), Port: t.dest.TCP()}) + srv.log.Debug("Resolved node", "id", t.dest.ID(), "addr", &net.TCPAddr{IP: t.dest.IP(), Port: t.dest.TCP()}) return true } @@ -350,26 +313,34 @@ func (t *dialTask) String() string { return fmt.Sprintf("%v %x %v:%d", t.flags, id[:8], t.dest.IP(), t.dest.TCP()) } +// discoverTask runs discovery table operations. +// Only one discoverTask is active at any time. +// discoverTask.Do performs a random lookup. +type discoverTask struct { + want int + results []*enode.Node +} + func (t *discoverTask) Do(srv *Server) { - // newTasks generates a lookup task whenever dynamic dials are - // necessary. Lookups need to take some time, otherwise the - // event loop spins too fast. - next := srv.lastLookup.Add(lookupInterval) - if now := time.Now(); now.Before(next) { - time.Sleep(next.Sub(now)) - } - srv.lastLookup = time.Now() - t.results = srv.ntab.LookupRandom() + t.results = enode.ReadNodes(srv.discmix, t.want) } func (t *discoverTask) String() string { - s := "discovery lookup" + s := "discovery query" if len(t.results) > 0 { s += fmt.Sprintf(" (%d results)", len(t.results)) + } else { + s += fmt.Sprintf(" (want %d)", t.want) } return s } +// A waitExpireTask is generated if there are no other tasks +// to keep the loop in Server.run ticking. +type waitExpireTask struct { + time.Duration +} + func (t waitExpireTask) Do(*Server) { time.Sleep(t.Duration) } diff --git a/p2p/dial_test.go b/p2p/dial_test.go index 198d5f9e8..48e170a4e 100644 --- a/p2p/dial_test.go +++ b/p2p/dial_test.go @@ -73,7 +73,7 @@ func runDialTest(t *testing.T, test dialtest) { t.Errorf("ERROR round %d: got %v\nwant %v\nstate: %v\nrunning: %v", i, spew.Sdump(new), spew.Sdump(round.new), spew.Sdump(test.init), spew.Sdump(running)) } - t.Logf("round %d new tasks: %s", i, strings.TrimSpace(spew.Sdump(new))) + t.Logf("round %d (running %d) new tasks: %s", i, running, strings.TrimSpace(spew.Sdump(new))) // Time advances by 16 seconds on every round. vtime = vtime.Add(16 * time.Second) @@ -81,19 +81,11 @@ func runDialTest(t *testing.T, test dialtest) { } } -type fakeTable []*enode.Node - -func (t fakeTable) Self() *enode.Node { return new(enode.Node) } -func (t fakeTable) Close() {} -func (t fakeTable) LookupRandom() []*enode.Node { return nil } -func (t fakeTable) Resolve(*enode.Node) *enode.Node { return nil } -func (t fakeTable) ReadRandomNodes(buf []*enode.Node) int { return copy(buf, t) } - // This test checks that dynamic dials are launched from discovery results. func TestDialStateDynDial(t *testing.T) { config := &Config{Logger: testlog.Logger(t, log.LvlTrace)} runDialTest(t, dialtest{ - init: newDialState(enode.ID{}, fakeTable{}, 5, config), + init: newDialState(enode.ID{}, 5, config), rounds: []round{ // A discovery query is launched. { @@ -102,7 +94,9 @@ func TestDialStateDynDial(t *testing.T) { {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}}, {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}}, }, - new: []task{&discoverTask{}}, + new: []task{ + &discoverTask{want: 3}, + }, }, // Dynamic dials are launched when it completes. { @@ -188,7 +182,7 @@ func TestDialStateDynDial(t *testing.T) { }, new: []task{ &dialTask{flags: dynDialedConn, dest: newNode(uintID(7), nil)}, - &discoverTask{}, + &discoverTask{want: 2}, }, }, // Peer 7 is connected, but there still aren't enough dynamic peers @@ -218,7 +212,7 @@ func TestDialStateDynDial(t *testing.T) { &discoverTask{}, }, new: []task{ - &discoverTask{}, + &discoverTask{want: 2}, }, }, }, @@ -235,35 +229,37 @@ func TestDialStateDynDialBootnode(t *testing.T) { }, Logger: testlog.Logger(t, log.LvlTrace), } - table := fakeTable{ - newNode(uintID(4), nil), - newNode(uintID(5), nil), - newNode(uintID(6), nil), - newNode(uintID(7), nil), - newNode(uintID(8), nil), - } runDialTest(t, dialtest{ - init: newDialState(enode.ID{}, table, 5, config), + init: newDialState(enode.ID{}, 5, config), rounds: []round{ - // 2 dynamic dials attempted, bootnodes pending fallback interval { new: []task{ - &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)}, - &discoverTask{}, + &discoverTask{want: 5}, }, }, - // No dials succeed, bootnodes still pending fallback interval { done: []task{ + &discoverTask{ + results: []*enode.Node{ + newNode(uintID(4), nil), + newNode(uintID(5), nil), + }, + }, + }, + new: []task{ &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)}, &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)}, + &discoverTask{want: 3}, }, }, // No dials succeed, bootnodes still pending fallback interval {}, - // No dials succeed, 2 dynamic dials attempted and 1 bootnode too as fallback interval was reached + // 1 bootnode attempted as fallback interval was reached { + done: []task{ + &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)}, + &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)}, + }, new: []task{ &dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)}, }, @@ -275,15 +271,12 @@ func TestDialStateDynDialBootnode(t *testing.T) { }, new: []task{ &dialTask{flags: dynDialedConn, dest: newNode(uintID(2), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)}, }, }, // No dials succeed, 3rd bootnode is attempted { done: []task{ &dialTask{flags: dynDialedConn, dest: newNode(uintID(2), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)}, }, new: []task{ &dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)}, @@ -293,115 +286,19 @@ func TestDialStateDynDialBootnode(t *testing.T) { { done: []task{ &dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)}, - }, - new: []task{}, - }, - // Random dial succeeds, no more bootnodes are attempted - { - new: []task{ - &waitExpireTask{3 * time.Second}, - }, - peers: []*Peer{ - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(4), nil)}}, - }, - done: []task{ - &dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)}, - }, - }, - }, - }) -} - -func TestDialStateDynDialFromTable(t *testing.T) { - // This table always returns the same random nodes - // in the order given below. - table := fakeTable{ - newNode(uintID(1), nil), - newNode(uintID(2), nil), - newNode(uintID(3), nil), - newNode(uintID(4), nil), - newNode(uintID(5), nil), - newNode(uintID(6), nil), - newNode(uintID(7), nil), - newNode(uintID(8), nil), - } - - runDialTest(t, dialtest{ - init: newDialState(enode.ID{}, table, 10, &Config{Logger: testlog.Logger(t, log.LvlTrace)}), - rounds: []round{ - // 5 out of 8 of the nodes returned by ReadRandomNodes are dialed. - { - new: []task{ - &dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(2), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)}, - &discoverTask{}, - }, - }, - // Dialing nodes 1,2 succeeds. Dials from the lookup are launched. - { - peers: []*Peer{ - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}}, - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}}, - }, - done: []task{ - &dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(2), nil)}, &discoverTask{results: []*enode.Node{ - newNode(uintID(10), nil), - newNode(uintID(11), nil), - newNode(uintID(12), nil), + newNode(uintID(6), nil), }}, }, new: []task{ - &dialTask{flags: dynDialedConn, dest: newNode(uintID(10), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(11), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(12), nil)}, - &discoverTask{}, - }, - }, - // Dialing nodes 3,4,5 fails. The dials from the lookup succeed. - { - peers: []*Peer{ - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}}, - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}}, - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(10), nil)}}, - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(11), nil)}}, - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(12), nil)}}, - }, - done: []task{ - &dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(10), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(11), nil)}, - &dialTask{flags: dynDialedConn, dest: newNode(uintID(12), nil)}, + &dialTask{flags: dynDialedConn, dest: newNode(uintID(6), nil)}, + &discoverTask{want: 4}, }, }, - // Waiting for expiry. No waitExpireTask is launched because the - // discovery query is still running. + // Random dial succeeds, no more bootnodes are attempted { peers: []*Peer{ - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}}, - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}}, - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(10), nil)}}, - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(11), nil)}}, - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(12), nil)}}, - }, - }, - // Nodes 3,4 are not tried again because only the first two - // returned random nodes (nodes 1,2) are tried and they're - // already connected. - { - peers: []*Peer{ - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}}, - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}}, - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(10), nil)}}, - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(11), nil)}}, - {rw: &conn{flags: dynDialedConn, node: newNode(uintID(12), nil)}}, + {rw: &conn{flags: dynDialedConn, node: newNode(uintID(6), nil)}}, }, }, }, @@ -416,11 +313,11 @@ func newNode(id enode.ID, ip net.IP) *enode.Node { return enode.SignNull(&r, id) } -// This test checks that candidates that do not match the netrestrict list are not dialed. +// // This test checks that candidates that do not match the netrestrict list are not dialed. func TestDialStateNetRestrict(t *testing.T) { // This table always returns the same random nodes // in the order given below. - table := fakeTable{ + nodes := []*enode.Node{ newNode(uintID(1), net.ParseIP("127.0.0.1")), newNode(uintID(2), net.ParseIP("127.0.0.2")), newNode(uintID(3), net.ParseIP("127.0.0.3")), @@ -434,12 +331,23 @@ func TestDialStateNetRestrict(t *testing.T) { restrict.Add("127.0.2.0/24") runDialTest(t, dialtest{ - init: newDialState(enode.ID{}, table, 10, &Config{NetRestrict: restrict}), + init: newDialState(enode.ID{}, 10, &Config{NetRestrict: restrict}), rounds: []round{ { new: []task{ - &dialTask{flags: dynDialedConn, dest: table[4]}, - &discoverTask{}, + &discoverTask{want: 10}, + }, + }, + { + done: []task{ + &discoverTask{results: nodes}, + }, + new: []task{ + &dialTask{flags: dynDialedConn, dest: nodes[4]}, + &dialTask{flags: dynDialedConn, dest: nodes[5]}, + &dialTask{flags: dynDialedConn, dest: nodes[6]}, + &dialTask{flags: dynDialedConn, dest: nodes[7]}, + &discoverTask{want: 6}, }, }, }, @@ -459,7 +367,7 @@ func TestDialStateStaticDial(t *testing.T) { Logger: testlog.Logger(t, log.LvlTrace), } runDialTest(t, dialtest{ - init: newDialState(enode.ID{}, fakeTable{}, 0, config), + init: newDialState(enode.ID{}, 0, config), rounds: []round{ // Static dials are launched for the nodes that // aren't yet connected. @@ -544,7 +452,7 @@ func TestDialStateCache(t *testing.T) { Logger: testlog.Logger(t, log.LvlTrace), } runDialTest(t, dialtest{ - init: newDialState(enode.ID{}, fakeTable{}, 0, config), + init: newDialState(enode.ID{}, 0, config), rounds: []round{ // Static dials are launched for the nodes that // aren't yet connected. @@ -618,8 +526,8 @@ func TestDialResolve(t *testing.T) { Dialer: TCPDialer{&net.Dialer{Deadline: time.Now().Add(-5 * time.Minute)}}, } resolved := newNode(uintID(1), net.IP{127, 0, 55, 234}) - table := &resolveMock{answer: resolved} - state := newDialState(enode.ID{}, table, 0, config) + resolver := &resolveMock{answer: resolved} + state := newDialState(enode.ID{}, 0, config) // Check that the task is generated with an incomplete ID. dest := newNode(uintID(1), nil) @@ -630,10 +538,14 @@ func TestDialResolve(t *testing.T) { } // Now run the task, it should resolve the ID once. - srv := &Server{ntab: table, log: config.Logger, Config: *config} + srv := &Server{ + Config: *config, + log: config.Logger, + staticNodeResolver: resolver, + } tasks[0].Do(srv) - if !reflect.DeepEqual(table.resolveCalls, []*enode.Node{dest}) { - t.Fatalf("wrong resolve calls, got %v", table.resolveCalls) + if !reflect.DeepEqual(resolver.calls, []*enode.Node{dest}) { + t.Fatalf("wrong resolve calls, got %v", resolver.calls) } // Report it as done to the dialer, which should update the static node record. @@ -666,18 +578,13 @@ func uintID(i uint32) enode.ID { return id } -// implements discoverTable for TestDialResolve +// for TestDialResolve type resolveMock struct { - resolveCalls []*enode.Node - answer *enode.Node + calls []*enode.Node + answer *enode.Node } func (t *resolveMock) Resolve(n *enode.Node) *enode.Node { - t.resolveCalls = append(t.resolveCalls, n) + t.calls = append(t.calls, n) return t.answer } - -func (t *resolveMock) Self() *enode.Node { return new(enode.Node) } -func (t *resolveMock) Close() {} -func (t *resolveMock) LookupRandom() []*enode.Node { return nil } -func (t *resolveMock) ReadRandomNodes(buf []*enode.Node) int { return 0 } diff --git a/p2p/discover/common.go b/p2p/discover/common.go index 2180d9620..abfa911c0 100644 --- a/p2p/discover/common.go +++ b/p2p/discover/common.go @@ -25,6 +25,7 @@ import ( "github.com/ethersocial/go-ethersocial/p2p/netutil" ) +// UDPConn is a network connection on which discovery can operate. type UDPConn interface { ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) WriteToUDP(b []byte, addr *net.UDPAddr) (n int, err error) @@ -32,7 +33,7 @@ type UDPConn interface { LocalAddr() net.Addr } -// Config holds Table-related settings. +// Config holds settings for the discovery listener. type Config struct { // These settings are required and configure the UDP listener: PrivateKey *ecdsa.PrivateKey @@ -50,7 +51,7 @@ func ListenUDP(c UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv4, error) { } // ReadPacket is a packet that couldn't be handled. Those packets are sent to the unhandled -// channel if configured. +// channel if configured. This is exported for internal use, do not use this type. type ReadPacket struct { Data []byte Addr *net.UDPAddr diff --git a/p2p/discover/lookup.go b/p2p/discover/lookup.go new file mode 100644 index 000000000..157b1fcb9 --- /dev/null +++ b/p2p/discover/lookup.go @@ -0,0 +1,209 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package discover + +import ( + "context" + + "github.com/ethersocial/go-ethersocial/p2p/enode" +) + +// lookup performs a network search for nodes close to the given target. It approaches the +// target by querying nodes that are closer to it on each iteration. The given target does +// not need to be an actual node identifier. +type lookup struct { + tab *Table + queryfunc func(*node) ([]*node, error) + replyCh chan []*node + cancelCh <-chan struct{} + asked, seen map[enode.ID]bool + result nodesByDistance + replyBuffer []*node + queries int +} + +type queryFunc func(*node) ([]*node, error) + +func newLookup(ctx context.Context, tab *Table, target enode.ID, q queryFunc) *lookup { + it := &lookup{ + tab: tab, + queryfunc: q, + asked: make(map[enode.ID]bool), + seen: make(map[enode.ID]bool), + result: nodesByDistance{target: target}, + replyCh: make(chan []*node, alpha), + cancelCh: ctx.Done(), + queries: -1, + } + // Don't query further if we hit ourself. + // Unlikely to happen often in practice. + it.asked[tab.self().ID()] = true + return it +} + +// run runs the lookup to completion and returns the closest nodes found. +func (it *lookup) run() []*enode.Node { + for it.advance() { + } + return unwrapNodes(it.result.entries) +} + +// advance advances the lookup until any new nodes have been found. +// It returns false when the lookup has ended. +func (it *lookup) advance() bool { + for it.startQueries() { + select { + case nodes := <-it.replyCh: + it.replyBuffer = it.replyBuffer[:0] + for _, n := range nodes { + if n != nil && !it.seen[n.ID()] { + it.seen[n.ID()] = true + it.result.push(n, bucketSize) + it.replyBuffer = append(it.replyBuffer, n) + } + } + it.queries-- + if len(it.replyBuffer) > 0 { + return true + } + case <-it.cancelCh: + it.shutdown() + } + } + return false +} + +func (it *lookup) shutdown() { + for it.queries > 0 { + <-it.replyCh + it.queries-- + } + it.queryfunc = nil + it.replyBuffer = nil +} + +func (it *lookup) startQueries() bool { + if it.queryfunc == nil { + return false + } + + // The first query returns nodes from the local table. + if it.queries == -1 { + it.tab.mutex.Lock() + closest := it.tab.closest(it.result.target, bucketSize, false) + it.tab.mutex.Unlock() + it.queries = 1 + it.replyCh <- closest.entries + return true + } + + // Ask the closest nodes that we haven't asked yet. + for i := 0; i < len(it.result.entries) && it.queries < alpha; i++ { + n := it.result.entries[i] + if !it.asked[n.ID()] { + it.asked[n.ID()] = true + it.queries++ + go it.query(n, it.replyCh) + } + } + // The lookup ends when no more nodes can be asked. + return it.queries > 0 +} + +func (it *lookup) query(n *node, reply chan<- []*node) { + fails := it.tab.db.FindFails(n.ID(), n.IP()) + r, err := it.queryfunc(n) + if err == errClosed { + // Avoid recording failures on shutdown. + reply <- nil + return + } else if len(r) == 0 { + fails++ + it.tab.db.UpdateFindFails(n.ID(), n.IP(), fails) + it.tab.log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err) + if fails >= maxFindnodeFailures { + it.tab.log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails) + it.tab.delete(n) + } + } else if fails > 0 { + // Reset failure counter because it counts _consecutive_ failures. + it.tab.db.UpdateFindFails(n.ID(), n.IP(), 0) + } + + // Grab as many nodes as possible. Some of them might not be alive anymore, but we'll + // just remove those again during revalidation. + for _, n := range r { + it.tab.addSeenNode(n) + } + reply <- r +} + +// lookupIterator performs lookup operations and iterates over all seen nodes. +// When a lookup finishes, a new one is created through nextLookup. +type lookupIterator struct { + buffer []*node + nextLookup lookupFunc + ctx context.Context + cancel func() + lookup *lookup +} + +type lookupFunc func(ctx context.Context) *lookup + +func newLookupIterator(ctx context.Context, next lookupFunc) *lookupIterator { + ctx, cancel := context.WithCancel(ctx) + return &lookupIterator{ctx: ctx, cancel: cancel, nextLookup: next} +} + +// Node returns the current node. +func (it *lookupIterator) Node() *enode.Node { + if len(it.buffer) == 0 { + return nil + } + return unwrapNode(it.buffer[0]) +} + +// Next moves to the next node. +func (it *lookupIterator) Next() bool { + // Consume next node in buffer. + if len(it.buffer) > 0 { + it.buffer = it.buffer[1:] + } + // Advance the lookup to refill the buffer. + for len(it.buffer) == 0 { + if it.ctx.Err() != nil { + it.lookup = nil + it.buffer = nil + return false + } + if it.lookup == nil { + it.lookup = it.nextLookup(it.ctx) + continue + } + if !it.lookup.advance() { + it.lookup = nil + continue + } + it.buffer = it.lookup.replyBuffer + } + return true +} + +// Close ends the iterator. +func (it *lookupIterator) Close() { + it.cancel() +} diff --git a/p2p/discover/table_util_test.go b/p2p/discover/table_util_test.go index 792c47c71..4ff8b8f3b 100644 --- a/p2p/discover/table_util_test.go +++ b/p2p/discover/table_util_test.go @@ -17,11 +17,14 @@ package discover import ( + "bytes" "crypto/ecdsa" "encoding/hex" + "errors" "fmt" "math/rand" "net" + "reflect" "sort" "sync" @@ -169,6 +172,28 @@ func hasDuplicates(slice []*node) bool { return false } +func checkNodesEqual(got, want []*enode.Node) error { + if reflect.DeepEqual(got, want) { + return nil + } + output := new(bytes.Buffer) + fmt.Fprintf(output, "got %d nodes:\n", len(got)) + for _, n := range got { + fmt.Fprintf(output, " %v %v\n", n.ID(), n) + } + fmt.Fprintf(output, "want %d:\n", len(want)) + for _, n := range want { + fmt.Fprintf(output, " %v %v\n", n.ID(), n) + } + return errors.New(output.String()) +} + +func sortByID(nodes []*enode.Node) { + sort.Slice(nodes, func(i, j int) bool { + return string(nodes[i].ID().Bytes()) < string(nodes[j].ID().Bytes()) + }) +} + func sortedByDistanceTo(distbase enode.ID, slice []*node) bool { return sort.SliceIsSorted(slice, func(i, j int) bool { return enode.DistCmp(distbase, slice[i].ID(), slice[j].ID()) < 0 diff --git a/p2p/discover/v4_udp_lookup_test.go b/p2p/discover/v4_lookup_test.go similarity index 75% rename from p2p/discover/v4_udp_lookup_test.go rename to p2p/discover/v4_lookup_test.go index 06e37745c..8d88c36c9 100644 --- a/p2p/discover/v4_udp_lookup_test.go +++ b/p2p/discover/v4_lookup_test.go @@ -20,7 +20,6 @@ import ( "crypto/ecdsa" "fmt" "net" - "reflect" "sort" "testing" @@ -49,19 +48,7 @@ func TestUDPv4_Lookup(t *testing.T) { }() // Answer lookup packets. - for done := false; !done; { - done = test.waitPacketOut(func(p packetV4, to *net.UDPAddr, hash []byte) { - n, key := lookupTestnet.nodeByAddr(to) - switch p.(type) { - case *pingV4: - test.packetInFrom(nil, key, to, &pongV4{Expiration: futureExp, ReplyTok: hash}) - case *findnodeV4: - dist := enode.LogDist(n.ID(), lookupTestnet.target.id()) - nodes := lookupTestnet.nodesAtDistance(dist - 1) - test.packetInFrom(nil, key, to, &neighborsV4{Expiration: futureExp, Nodes: nodes}) - } - }) - } + serveTestnet(test, lookupTestnet) // Verify result nodes. results := <-resultC @@ -78,8 +65,94 @@ func TestUDPv4_Lookup(t *testing.T) { if !sortedByDistanceTo(lookupTestnet.target.id(), wrapNodes(results)) { t.Errorf("result set not sorted by distance to target") } - if !reflect.DeepEqual(results, lookupTestnet.closest(bucketSize)) { - t.Errorf("results aren't the closest %d nodes", bucketSize) + if err := checkNodesEqual(results, lookupTestnet.closest(bucketSize)); err != nil { + t.Errorf("results aren't the closest %d nodes\n%v", bucketSize, err) + } +} + +func TestUDPv4_LookupIterator(t *testing.T) { + t.Parallel() + test := newUDPTest(t) + defer test.close() + + // Seed table with initial nodes. + bootnodes := make([]*node, len(lookupTestnet.dists[256])) + for i := range lookupTestnet.dists[256] { + bootnodes[i] = wrapNode(lookupTestnet.node(256, i)) + } + fillTable(test.table, bootnodes) + go serveTestnet(test, lookupTestnet) + + // Create the iterator and collect the nodes it yields. + iter := test.udp.RandomNodes() + seen := make(map[enode.ID]*enode.Node) + for limit := lookupTestnet.len(); iter.Next() && len(seen) < limit; { + seen[iter.Node().ID()] = iter.Node() + } + iter.Close() + + // Check that all nodes in lookupTestnet were seen by the iterator. + results := make([]*enode.Node, 0, len(seen)) + for _, n := range seen { + results = append(results, n) + } + sortByID(results) + want := lookupTestnet.nodes() + if err := checkNodesEqual(results, want); err != nil { + t.Fatal(err) + } +} + +// TestUDPv4_LookupIteratorClose checks that lookupIterator ends when its Close +// method is called. +func TestUDPv4_LookupIteratorClose(t *testing.T) { + t.Parallel() + test := newUDPTest(t) + defer test.close() + + // Seed table with initial nodes. + bootnodes := make([]*node, len(lookupTestnet.dists[256])) + for i := range lookupTestnet.dists[256] { + bootnodes[i] = wrapNode(lookupTestnet.node(256, i)) + } + fillTable(test.table, bootnodes) + go serveTestnet(test, lookupTestnet) + + it := test.udp.RandomNodes() + if ok := it.Next(); !ok || it.Node() == nil { + t.Fatalf("iterator didn't return any node") + } + + it.Close() + + ncalls := 0 + for ; ncalls < 100 && it.Next(); ncalls++ { + if it.Node() == nil { + t.Error("iterator returned Node() == nil node after Next() == true") + } + } + t.Logf("iterator returned %d nodes after close", ncalls) + if it.Next() { + t.Errorf("Next() == true after close and %d more calls", ncalls) + } + if n := it.Node(); n != nil { + t.Errorf("iterator returned non-nil node after close and %d more calls", ncalls) + } +} + +func serveTestnet(test *udpTest, testnet *preminedTestnet) { + for done := false; !done; { + done = test.waitPacketOut(func(p packetV4, to *net.UDPAddr, hash []byte) { + n, key := testnet.nodeByAddr(to) + switch p.(type) { + case *pingV4: + test.packetInFrom(nil, key, to, &pongV4{Expiration: futureExp, ReplyTok: hash}) + case *findnodeV4: + dist := enode.LogDist(n.ID(), testnet.target.id()) + nodes := testnet.nodesAtDistance(dist - 1) + test.packetInFrom(nil, key, to, &neighborsV4{Expiration: futureExp, Nodes: nodes}) + } + }) } } @@ -148,6 +221,25 @@ type preminedTestnet struct { dists [hashBits + 1][]*ecdsa.PrivateKey } +func (tn *preminedTestnet) len() int { + n := 0 + for _, keys := range tn.dists { + n += len(keys) + } + return n +} + +func (tn *preminedTestnet) nodes() []*enode.Node { + result := make([]*enode.Node, 0, tn.len()) + for dist, keys := range tn.dists { + for index := range keys { + result = append(result, tn.node(dist, index)) + } + } + sortByID(result) + return result +} + func (tn *preminedTestnet) node(dist, index int) *enode.Node { key := tn.dists[dist][index] ip := net.IP{127, byte(dist >> 8), byte(dist), byte(index)} diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go index 58b51267c..b49e35684 100644 --- a/p2p/discover/v4_udp.go +++ b/p2p/discover/v4_udp.go @@ -19,6 +19,7 @@ package discover import ( "bytes" "container/list" + "context" "crypto/ecdsa" crand "crypto/rand" "errors" @@ -207,7 +208,8 @@ type UDPv4 struct { addReplyMatcher chan *replyMatcher gotreply chan reply - closing chan struct{} + closeCtx context.Context + cancelCloseCtx func() } // replyMatcher represents a pending reply. @@ -256,20 +258,23 @@ type reply struct { } func ListenV4(c UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv4, error) { + closeCtx, cancel := context.WithCancel(context.Background()) t := &UDPv4{ conn: c, priv: cfg.PrivateKey, netrestrict: cfg.NetRestrict, localNode: ln, db: ln.Database(), - closing: make(chan struct{}), gotreply: make(chan reply), addReplyMatcher: make(chan *replyMatcher), + closeCtx: closeCtx, + cancelCloseCtx: cancel, log: cfg.Log, } if t.log == nil { t.log = log.Root() } + tab, err := newTable(t, ln.Database(), cfg.Bootnodes, t.log) if err != nil { return nil, err @@ -291,126 +296,13 @@ func (t *UDPv4) Self() *enode.Node { // Close shuts down the socket and aborts any running queries. func (t *UDPv4) Close() { t.closeOnce.Do(func() { - close(t.closing) + t.cancelCloseCtx() t.conn.Close() t.wg.Wait() t.tab.close() }) } -// ReadRandomNodes reads random nodes from the local table. -func (t *UDPv4) ReadRandomNodes(buf []*enode.Node) int { - return t.tab.ReadRandomNodes(buf) -} - -// LookupRandom finds random nodes in the network. -func (t *UDPv4) LookupRandom() []*enode.Node { - if t.tab.len() == 0 { - // All nodes were dropped, refresh. The very first query will hit this - // case and run the bootstrapping logic. - <-t.tab.refresh() - } - return t.lookupRandom() -} - -func (t *UDPv4) LookupPubkey(key *ecdsa.PublicKey) []*enode.Node { - if t.tab.len() == 0 { - // All nodes were dropped, refresh. The very first query will hit this - // case and run the bootstrapping logic. - <-t.tab.refresh() - } - return unwrapNodes(t.lookup(encodePubkey(key))) -} - -func (t *UDPv4) lookupRandom() []*enode.Node { - var target encPubkey - crand.Read(target[:]) - return unwrapNodes(t.lookup(target)) -} - -func (t *UDPv4) lookupSelf() []*enode.Node { - return unwrapNodes(t.lookup(encodePubkey(&t.priv.PublicKey))) -} - -// lookup performs a network search for nodes close to the given target. It approaches the -// target by querying nodes that are closer to it on each iteration. The given target does -// not need to be an actual node identifier. -func (t *UDPv4) lookup(targetKey encPubkey) []*node { - var ( - target = enode.ID(crypto.Keccak256Hash(targetKey[:])) - asked = make(map[enode.ID]bool) - seen = make(map[enode.ID]bool) - reply = make(chan []*node, alpha) - pendingQueries = 0 - result *nodesByDistance - ) - // Don't query further if we hit ourself. - // Unlikely to happen often in practice. - asked[t.Self().ID()] = true - - // Generate the initial result set. - t.tab.mutex.Lock() - result = t.tab.closest(target, bucketSize, false) - t.tab.mutex.Unlock() - - for { - // ask the alpha closest nodes that we haven't asked yet - for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ { - n := result.entries[i] - if !asked[n.ID()] { - asked[n.ID()] = true - pendingQueries++ - go t.lookupWorker(n, targetKey, reply) - } - } - if pendingQueries == 0 { - // we have asked all closest nodes, stop the search - break - } - select { - case nodes := <-reply: - for _, n := range nodes { - if n != nil && !seen[n.ID()] { - seen[n.ID()] = true - result.push(n, bucketSize) - } - } - case <-t.tab.closeReq: - return nil // shutdown, no need to continue. - } - pendingQueries-- - } - return result.entries -} - -func (t *UDPv4) lookupWorker(n *node, targetKey encPubkey, reply chan<- []*node) { - fails := t.db.FindFails(n.ID(), n.IP()) - r, err := t.findnode(n.ID(), n.addr(), targetKey) - if err == errClosed { - // Avoid recording failures on shutdown. - reply <- nil - return - } else if len(r) == 0 { - fails++ - t.db.UpdateFindFails(n.ID(), n.IP(), fails) - t.log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err) - if fails >= maxFindnodeFailures { - t.log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails) - t.tab.delete(n) - } - } else if fails > 0 { - // Reset failure counter because it counts _consecutive_ failures. - t.db.UpdateFindFails(n.ID(), n.IP(), 0) - } - - // Grab as many nodes as possible. Some of them might not be alive anymore, but we'll - // just remove those again during revalidation. - for _, n := range r { - t.tab.addSeenNode(n) - } - reply <- r -} - // Resolve searches for a specific node with the given ID and tries to get the most recent // version of the node record for it. It returns n if the node could not be resolved. func (t *UDPv4) Resolve(n *enode.Node) *enode.Node { @@ -498,6 +390,45 @@ func (t *UDPv4) makePing(toaddr *net.UDPAddr) *pingV4 { } } +// LookupPubkey finds the closest nodes to the given public key. +func (t *UDPv4) LookupPubkey(key *ecdsa.PublicKey) []*enode.Node { + if t.tab.len() == 0 { + // All nodes were dropped, refresh. The very first query will hit this + // case and run the bootstrapping logic. + <-t.tab.refresh() + } + return t.newLookup(t.closeCtx, encodePubkey(key)).run() +} + +// RandomNodes is an iterator yielding nodes from a random walk of the DHT. +func (t *UDPv4) RandomNodes() enode.Iterator { + return newLookupIterator(t.closeCtx, t.newRandomLookup) +} + +// lookupRandom implements transport. +func (t *UDPv4) lookupRandom() []*enode.Node { + return t.newRandomLookup(t.closeCtx).run() +} + +// lookupSelf implements transport. +func (t *UDPv4) lookupSelf() []*enode.Node { + return t.newLookup(t.closeCtx, encodePubkey(&t.priv.PublicKey)).run() +} + +func (t *UDPv4) newRandomLookup(ctx context.Context) *lookup { + var target encPubkey + crand.Read(target[:]) + return t.newLookup(ctx, target) +} + +func (t *UDPv4) newLookup(ctx context.Context, targetKey encPubkey) *lookup { + target := enode.ID(crypto.Keccak256Hash(targetKey[:])) + it := newLookup(ctx, t.tab, target, func(n *node) ([]*node, error) { + return t.findnode(n.ID(), n.addr(), targetKey) + }) + return it +} + // findnode sends a findnode request to the given node and waits until // the node has sent up to k neighbors. func (t *UDPv4) findnode(toid enode.ID, toaddr *net.UDPAddr, target encPubkey) ([]*node, error) { @@ -575,7 +506,7 @@ func (t *UDPv4) pending(id enode.ID, ip net.IP, ptype byte, callback replyMatchF select { case t.addReplyMatcher <- p: // loop will handle it - case <-t.closing: + case <-t.closeCtx.Done(): ch <- errClosed } return p @@ -589,7 +520,7 @@ func (t *UDPv4) handleReply(from enode.ID, fromIP net.IP, req packetV4) bool { case t.gotreply <- reply{from, fromIP, req, matched}: // loop will handle it return <-matched - case <-t.closing: + case <-t.closeCtx.Done(): return false } } @@ -635,7 +566,7 @@ func (t *UDPv4) loop() { resetTimeout() select { - case <-t.closing: + case <-t.closeCtx.Done(): for el := plist.Front(); el != nil; el = el.Next() { el.Value.(*replyMatcher).errc <- errClosed } diff --git a/p2p/dnsdisc/client.go b/p2p/dnsdisc/client.go index e003caf71..45cc4b20c 100644 --- a/p2p/dnsdisc/client.go +++ b/p2p/dnsdisc/client.go @@ -107,7 +107,7 @@ func NewClient(cfg Config, urls ...string) (*Client, error) { // SyncTree downloads the entire node tree at the given URL. This doesn't add the tree for // later use, but any previously-synced entries are reused. func (c *Client) SyncTree(url string) (*Tree, error) { - le, err := parseURL(url) + le, err := parseLink(url) if err != nil { return nil, fmt.Errorf("invalid enrtree URL: %v", err) } @@ -122,7 +122,7 @@ func (c *Client) SyncTree(url string) (*Tree, error) { // AddTree adds a enrtree:// URL to crawl. func (c *Client) AddTree(url string) error { - le, err := parseURL(url) + le, err := parseLink(url) if err != nil { return fmt.Errorf("invalid enrtree URL: %v", err) } diff --git a/p2p/dnsdisc/client_test.go b/p2p/dnsdisc/client_test.go index 4ce8ca5ee..1a8802f91 100644 --- a/p2p/dnsdisc/client_test.go +++ b/p2p/dnsdisc/client_test.go @@ -41,12 +41,12 @@ const ( func TestClientSyncTree(t *testing.T) { r := mapResolver{ - "3CA2MBMUQ55ZCT74YEEQLANJDI.n": "enr=-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI=", - "53HBTPGGZ4I76UEPCNQGZWIPTQ.n": "enr=-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA=", - "BG7SVUBUAJ3UAWD2ATEBLMRNEE.n": "enrtree=53HBTPGGZ4I76UEPCNQGZWIPTQ,3CA2MBMUQ55ZCT74YEEQLANJDI,HNHR6UTVZF5TJKK3FV27ZI76P4", - "HNHR6UTVZF5TJKK3FV27ZI76P4.n": "enr=-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o=", - "JGUFMSAGI7KZYB3P7IZW4S5Y3A.n": "enrtree-link=AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org", - "n": "enrtree-root=v1 e=BG7SVUBUAJ3UAWD2ATEBLMRNEE l=JGUFMSAGI7KZYB3P7IZW4S5Y3A seq=1 sig=gacuU0nTy9duIdu1IFDyF5Lv9CFHqHiNcj91n0frw70tZo3tZZsCVkE3j1ILYyVOHRLWGBmawo_SEkThZ9PgcQE=", + "n": "enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA", + "C7HRFPF3BLGF3YR4DY5KX3SMBE.n": "enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org", + "JWXYDBPXYWG6FX3GMDIBFA6CJ4.n": "enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24", + "2XS2367YHAXJFGLZHVAWLQD4ZY.n": "enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA", + "H4FHT4B454P6UXFD7JCYQ5PWDY.n": "enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI", + "MHTDO6TMUBRIA2XWG5LUDACK24.n": "enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o", } var ( wantNodes = testNodes(0x29452, 3) @@ -75,15 +75,25 @@ func TestClientSyncTree(t *testing.T) { // In this test, syncing the tree fails because it contains an invalid ENR entry. func TestClientSyncTreeBadNode(t *testing.T) { + // var b strings.Builder + // b.WriteString(enrPrefix) + // b.WriteString("-----") + // badHash := subdomain(&b) + // tree, _ := MakeTree(3, nil, []string{"enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org"}) + // tree.entries[badHash] = &b + // tree.root.eroot = badHash + // url, _ := tree.Sign(testKey(signingKeySeed), "n") + // fmt.Println(url) + // fmt.Printf("%#v\n", tree.ToTXT("n")) + r := mapResolver{ - "n": "enrtree-root=v1 e=ZFJZDQKSOMJRYYQSZKJZC54HCF l=JGUFMSAGI7KZYB3P7IZW4S5Y3A seq=3 sig=WEy8JTZ2dHmXM2qeBZ7D2ECK7SGbnurl1ge_S_5GQBAqnADk0gLTcg8Lm5QNqLHZjJKGAb443p996idlMcBqEQA=", - "JGUFMSAGI7KZYB3P7IZW4S5Y3A.n": "enrtree-link=AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org", - "ZFJZDQKSOMJRYYQSZKJZC54HCF.n": "enr=gggggggggggggg=", + "n": "enrtree-root:v1 e=INDMVBZEEQ4ESVYAKGIYU74EAA l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=3 sig=Vl3AmunLur0JZ3sIyJPSH6A3Vvdp4F40jWQeCmkIhmcgwE4VC5U9wpK8C_uL_CMY29fd6FAhspRvq2z_VysTLAA", + "C7HRFPF3BLGF3YR4DY5KX3SMBE.n": "enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org", + "INDMVBZEEQ4ESVYAKGIYU74EAA.n": "enr:-----", } - c, _ := NewClient(Config{Resolver: r, Logger: testlog.Logger(t, log.LvlTrace)}) - _, err := c.SyncTree("enrtree://APFGGTFOBVE2ZNAB3CSMNNX6RRK3ODIRLP2AA5U4YFAA6MSYZUYTQ@n") - wantErr := nameError{name: "ZFJZDQKSOMJRYYQSZKJZC54HCF.n", err: entryError{typ: "enr", err: errInvalidENR}} + _, err := c.SyncTree("enrtree://AKPYQIUQIL7PSIACI32J7FGZW56E5FKHEFCCOFHILBIMW3M6LWXS2@n") + wantErr := nameError{name: "INDMVBZEEQ4ESVYAKGIYU74EAA.n", err: entryError{typ: "enr", err: errInvalidENR}} if err != wantErr { t.Fatalf("expected sync error %q, got %q", wantErr, err) } diff --git a/p2p/dnsdisc/sync.go b/p2p/dnsdisc/sync.go index fb812a3b8..bfa9bcdc1 100644 --- a/p2p/dnsdisc/sync.go +++ b/p2p/dnsdisc/sync.go @@ -120,7 +120,7 @@ func (ct *clientTree) syncNextRandomENR(ctx context.Context) (*enode.Node, error } func (ct *clientTree) String() string { - return ct.loc.url() + return ct.loc.String() } // removeHash removes the element at index from h. @@ -209,7 +209,7 @@ func (ts *subtreeSync) resolveNext(ctx context.Context, hash string) (entry, err if !ts.link { return nil, errLinkInENRTree } - case *subtreeEntry: + case *branchEntry: ts.missing = append(ts.missing, e.children...) } return e, nil diff --git a/p2p/dnsdisc/tree.go b/p2p/dnsdisc/tree.go index 2e2ae26a8..0036fef25 100644 --- a/p2p/dnsdisc/tree.go +++ b/p2p/dnsdisc/tree.go @@ -49,7 +49,7 @@ func (t *Tree) Sign(key *ecdsa.PrivateKey, domain string) (url string, err error root.sig = sig t.root = &root link := &linkEntry{domain, &key.PublicKey} - return link.url(), nil + return link.String(), nil } // SetSignature verifies the given signature and assigns it as the tree's current @@ -96,7 +96,7 @@ func (t *Tree) Links() []string { var links []string for _, e := range t.entries { if le, ok := e.(*linkEntry); ok { - links = append(links, le.url()) + links = append(links, le.String()) } } return links @@ -115,15 +115,15 @@ func (t *Tree) Nodes() []*enode.Node { const ( hashAbbrev = 16 - maxChildren = 300 / (hashAbbrev * (13 / 8)) + maxChildren = 300 / hashAbbrev * (13 / 8) minHashLength = 12 - rootPrefix = "enrtree-root=v1" ) // MakeTree creates a tree containing the given nodes and links. func MakeTree(seq uint, nodes []*enode.Node, links []string) (*Tree, error) { // Sort records by ID and ensure all nodes have a valid record. records := make([]*enode.Node, len(nodes)) + copy(records, nodes) sortByID(records) for _, n := range records { @@ -139,7 +139,7 @@ func MakeTree(seq uint, nodes []*enode.Node, links []string) (*Tree, error) { } linkEntries := make([]entry, len(links)) for i, l := range links { - le, err := parseURL(l) + le, err := parseLink(l) if err != nil { return nil, err } @@ -166,7 +166,7 @@ func (t *Tree) build(entries []entry) entry { hashes[i] = subdomain(e) t.entries[hashes[i]] = e } - return &subtreeEntry{hashes} + return &branchEntry{hashes} } var subtrees []entry for len(entries) > 0 { @@ -202,7 +202,7 @@ type ( seq uint sig []byte } - subtreeEntry struct { + branchEntry struct { children []string } enrEntry struct { @@ -218,7 +218,14 @@ type ( var ( b32format = base32.StdEncoding.WithPadding(base32.NoPadding) - b64format = base64.URLEncoding + b64format = base64.RawURLEncoding +) + +const ( + rootPrefix = "enrtree-root:v1" + linkPrefix = "enrtree://" + branchPrefix = "enrtree-branch:" + enrPrefix = "enr:" ) func subdomain(e entry) string { @@ -242,37 +249,29 @@ func (e *rootEntry) verifySignature(pubkey *ecdsa.PublicKey) bool { return crypto.VerifySignature(crypto.FromECDSAPub(pubkey), e.sigHash(), sig) } -func (e *subtreeEntry) String() string { - return "enrtree=" + strings.Join(e.children, ",") +func (e *branchEntry) String() string { + return branchPrefix + strings.Join(e.children, ",") } func (e *enrEntry) String() string { - enc, _ := rlp.EncodeToBytes(e.node.Record()) - return "enr=" + b64format.EncodeToString(enc) + return e.node.String() } func (e *linkEntry) String() string { - return "enrtree-link=" + e.link() -} - -func (e *linkEntry) url() string { - return "enrtree://" + e.link() -} - -func (e *linkEntry) link() string { - return fmt.Sprintf("%s@%s", b32format.EncodeToString(crypto.CompressPubkey(e.pubkey)), e.domain) + pubkey := b32format.EncodeToString(crypto.CompressPubkey(e.pubkey)) + return fmt.Sprintf("%s%s@%s", linkPrefix, pubkey, e.domain) } // Entry Parsing func parseEntry(e string, validSchemes enr.IdentityScheme) (entry, error) { switch { - case strings.HasPrefix(e, "enrtree-link="): - return parseLink(e[13:]) - case strings.HasPrefix(e, "enrtree="): - return parseSubtree(e[8:]) - case strings.HasPrefix(e, "enr="): - return parseENR(e[4:], validSchemes) + case strings.HasPrefix(e, linkPrefix): + return parseLinkEntry(e) + case strings.HasPrefix(e, branchPrefix): + return parseBranch(e) + case strings.HasPrefix(e, enrPrefix): + return parseENR(e, validSchemes) default: return nil, errUnknownEntry } @@ -294,7 +293,19 @@ func parseRoot(e string) (rootEntry, error) { return rootEntry{eroot, lroot, seq, sigb}, nil } -func parseLink(e string) (entry, error) { +func parseLinkEntry(e string) (entry, error) { + le, err := parseLink(e) + if err != nil { + return nil, err + } + return le, nil +} + +func parseLink(e string) (*linkEntry, error) { + if !strings.HasPrefix(e, linkPrefix) { + return nil, fmt.Errorf("wrong/missing scheme 'enrtree' in URL") + } + e = e[len(linkPrefix):] pos := strings.IndexByte(e, '@') if pos == -1 { return nil, entryError{"link", errNoPubkey} @@ -311,21 +322,23 @@ func parseLink(e string) (entry, error) { return &linkEntry{domain, key}, nil } -func parseSubtree(e string) (entry, error) { +func parseBranch(e string) (entry, error) { + e = e[len(branchPrefix):] if e == "" { - return &subtreeEntry{}, nil // empty entry is OK + return &branchEntry{}, nil // empty entry is OK } hashes := make([]string, 0, strings.Count(e, ",")) for _, c := range strings.Split(e, ",") { if !isValidHash(c) { - return nil, entryError{"subtree", errInvalidChild} + return nil, entryError{"branch", errInvalidChild} } hashes = append(hashes, c) } - return &subtreeEntry{hashes}, nil + return &branchEntry{hashes}, nil } func parseENR(e string, validSchemes enr.IdentityScheme) (entry, error) { + e = e[len(enrPrefix):] enc, err := b64format.DecodeString(e) if err != nil { return nil, entryError{"enr", errInvalidENR} @@ -364,21 +377,9 @@ func truncateHash(hash string) string { // ParseURL parses an enrtree:// URL and returns its components. func ParseURL(url string) (domain string, pubkey *ecdsa.PublicKey, err error) { - le, err := parseURL(url) + le, err := parseLink(url) if err != nil { return "", nil, err } return le.domain, le.pubkey, nil } - -func parseURL(url string) (*linkEntry, error) { - const scheme = "enrtree://" - if !strings.HasPrefix(url, scheme) { - return nil, fmt.Errorf("wrong/missing scheme 'enrtree' in URL") - } - le, err := parseLink(url[len(scheme):]) - if err != nil { - return nil, err.(entryError).err - } - return le.(*linkEntry), nil -} diff --git a/p2p/dnsdisc/tree_test.go b/p2p/dnsdisc/tree_test.go index 72a73f92b..900768e41 100644 --- a/p2p/dnsdisc/tree_test.go +++ b/p2p/dnsdisc/tree_test.go @@ -32,15 +32,15 @@ func TestParseRoot(t *testing.T) { err error }{ { - input: "enrtree-root=v1 e=TO4Q75OQ2N7DX4EOOR7X66A6OM seq=3 sig=N-YY6UB9xD0hFx1Gmnt7v0RfSxch5tKyry2SRDoLx7B4GfPXagwLxQqyf7gAMvApFn_ORwZQekMWa_pXrcGCtw=", + input: "enrtree-root:v1 e=TO4Q75OQ2N7DX4EOOR7X66A6OM seq=3 sig=N-YY6UB9xD0hFx1Gmnt7v0RfSxch5tKyry2SRDoLx7B4GfPXagwLxQqyf7gAMvApFn_ORwZQekMWa_pXrcGCtw", err: entryError{"root", errSyntax}, }, { - input: "enrtree-root=v1 e=TO4Q75OQ2N7DX4EOOR7X66A6OM l=TO4Q75OQ2N7DX4EOOR7X66A6OM seq=3 sig=N-YY6UB9xD0hFx1Gmnt7v0RfSxch5tKyry2SRDoLx7B4GfPXagwLxQqyf7gAMvApFn_ORwZQekMWa_pXrcGCtw=", + input: "enrtree-root:v1 e=TO4Q75OQ2N7DX4EOOR7X66A6OM l=TO4Q75OQ2N7DX4EOOR7X66A6OM seq=3 sig=N-YY6UB9xD0hFx1Gmnt7v0RfSxch5tKyry2SRDoLx7B4GfPXagwLxQqyf7gAMvApFn_ORwZQekMWa_pXrcGCtw", err: entryError{"root", errInvalidSig}, }, { - input: "enrtree-root=v1 e=QFT4PBCRX4XQCV3VUYJ6BTCEPU l=JGUFMSAGI7KZYB3P7IZW4S5Y3A seq=3 sig=3FmXuVwpa8Y7OstZTx9PIb1mt8FrW7VpDOFv4AaGCsZ2EIHmhraWhe4NxYhQDlw5MjeFXYMbJjsPeKlHzmJREQE=", + input: "enrtree-root:v1 e=QFT4PBCRX4XQCV3VUYJ6BTCEPU l=JGUFMSAGI7KZYB3P7IZW4S5Y3A seq=3 sig=3FmXuVwpa8Y7OstZTx9PIb1mt8FrW7VpDOFv4AaGCsZ2EIHmhraWhe4NxYhQDlw5MjeFXYMbJjsPeKlHzmJREQE", e: rootEntry{ eroot: "QFT4PBCRX4XQCV3VUYJ6BTCEPU", lroot: "JGUFMSAGI7KZYB3P7IZW4S5Y3A", @@ -69,49 +69,49 @@ func TestParseEntry(t *testing.T) { }{ // Subtrees: { - input: "enrtree=1,2", - err: entryError{"subtree", errInvalidChild}, + input: "enrtree-branch:1,2", + err: entryError{"branch", errInvalidChild}, }, { - input: "enrtree=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - err: entryError{"subtree", errInvalidChild}, + input: "enrtree-branch:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + err: entryError{"branch", errInvalidChild}, }, { - input: "enrtree=", - e: &subtreeEntry{}, + input: "enrtree-branch:", + e: &branchEntry{}, }, { - input: "enrtree=AAAAAAAAAAAAAAAAAAAA", - e: &subtreeEntry{[]string{"AAAAAAAAAAAAAAAAAAAA"}}, + input: "enrtree-branch:AAAAAAAAAAAAAAAAAAAA", + e: &branchEntry{[]string{"AAAAAAAAAAAAAAAAAAAA"}}, }, { - input: "enrtree=AAAAAAAAAAAAAAAAAAAA,BBBBBBBBBBBBBBBBBBBB", - e: &subtreeEntry{[]string{"AAAAAAAAAAAAAAAAAAAA", "BBBBBBBBBBBBBBBBBBBB"}}, + input: "enrtree-branch:AAAAAAAAAAAAAAAAAAAA,BBBBBBBBBBBBBBBBBBBB", + e: &branchEntry{[]string{"AAAAAAAAAAAAAAAAAAAA", "BBBBBBBBBBBBBBBBBBBB"}}, }, // Links { - input: "enrtree-link=AKPYQIUQIL7PSIACI32J7FGZW56E5FKHEFCCOFHILBIMW3M6LWXS2@nodes.example.org", + input: "enrtree://AKPYQIUQIL7PSIACI32J7FGZW56E5FKHEFCCOFHILBIMW3M6LWXS2@nodes.example.org", e: &linkEntry{"nodes.example.org", &testkey.PublicKey}, }, { - input: "enrtree-link=nodes.example.org", + input: "enrtree://nodes.example.org", err: entryError{"link", errNoPubkey}, }, { - input: "enrtree-link=AP62DT7WOTEQZGQZOU474PP3KMEGVTTE7A7NPRXKX3DUD57@nodes.example.org", + input: "enrtree://AP62DT7WOTEQZGQZOU474PP3KMEGVTTE7A7NPRXKX3DUD57@nodes.example.org", err: entryError{"link", errBadPubkey}, }, { - input: "enrtree-link=AP62DT7WONEQZGQZOU474PP3KMEGVTTE7A7NPRXKX3DUD57TQHGIA@nodes.example.org", + input: "enrtree://AP62DT7WONEQZGQZOU474PP3KMEGVTTE7A7NPRXKX3DUD57TQHGIA@nodes.example.org", err: entryError{"link", errBadPubkey}, }, // ENRs { - input: "enr=-HW4QES8QIeXTYlDzbfr1WEzE-XKY4f8gJFJzjJL-9D7TC9lJb4Z3JPRRz1lP4pL_N_QpT6rGQjAU9Apnc-C1iMP36OAgmlkgnY0iXNlY3AyNTZrMaED5IdwfMxdmR8W37HqSFdQLjDkIwBd4Q_MjxgZifgKSdM=", + input: "enr:-HW4QES8QIeXTYlDzbfr1WEzE-XKY4f8gJFJzjJL-9D7TC9lJb4Z3JPRRz1lP4pL_N_QpT6rGQjAU9Apnc-C1iMP36OAgmlkgnY0iXNlY3AyNTZrMaED5IdwfMxdmR8W37HqSFdQLjDkIwBd4Q_MjxgZifgKSdM", e: &enrEntry{node: testNode(nodesSeed1)}, }, { - input: "enr=-HW4QLZHjM4vZXkbp-5xJoHsKSbE7W39FPC8283X-y8oHcHPTnDDlIlzL5ArvDUlHZVDPgmFASrh7cWgLOLxj4wprRkHgmlkgnY0iXNlY3AyNTZrMaEC3t2jLMhDpCDX5mbSEwDn4L3iUfyXzoO8G28XvjGRkrAg=", + input: "enr:-HW4QLZHjM4vZXkbp-5xJoHsKSbE7W39FPC8283X-y8oHcHPTnDDlIlzL5ArvDUlHZVDPgmFASrh7cWgLOLxj4wprRkHgmlkgnY0iXNlY3AyNTZrMaEC3t2jLMhDpCDX5mbSEwDn4L3iUfyXzoO8G28XvjGRkrAg=", err: entryError{"enr", errInvalidENR}, }, // Invalid: diff --git a/p2p/enode/iter.go b/p2p/enode/iter.go new file mode 100644 index 000000000..112b76d06 --- /dev/null +++ b/p2p/enode/iter.go @@ -0,0 +1,286 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package enode + +import ( + "sync" + "time" +) + +// Iterator represents a sequence of nodes. The Next method moves to the next node in the +// sequence. It returns false when the sequence has ended or the iterator is closed. Close +// may be called concurrently with Next and Node, and interrupts Next if it is blocked. +type Iterator interface { + Next() bool // moves to next node + Node() *Node // returns current node + Close() // ends the iterator +} + +// ReadNodes reads at most n nodes from the given iterator. The return value contains no +// duplicates and no nil values. To prevent looping indefinitely for small repeating node +// sequences, this function calls Next at most n times. +func ReadNodes(it Iterator, n int) []*Node { + seen := make(map[ID]*Node, n) + for i := 0; i < n && it.Next(); i++ { + // Remove duplicates, keeping the node with higher seq. + node := it.Node() + prevNode, ok := seen[node.ID()] + if ok && prevNode.Seq() > node.Seq() { + continue + } + seen[node.ID()] = node + } + result := make([]*Node, 0, len(seen)) + for _, node := range seen { + result = append(result, node) + } + return result +} + +// IterNodes makes an iterator which runs through the given nodes once. +func IterNodes(nodes []*Node) Iterator { + return &sliceIter{nodes: nodes, index: -1} +} + +// CycleNodes makes an iterator which cycles through the given nodes indefinitely. +func CycleNodes(nodes []*Node) Iterator { + return &sliceIter{nodes: nodes, index: -1, cycle: true} +} + +type sliceIter struct { + mu sync.Mutex + nodes []*Node + index int + cycle bool +} + +func (it *sliceIter) Next() bool { + it.mu.Lock() + defer it.mu.Unlock() + + if len(it.nodes) == 0 { + return false + } + it.index++ + if it.index == len(it.nodes) { + if it.cycle { + it.index = 0 + } else { + it.nodes = nil + return false + } + } + return true +} + +func (it *sliceIter) Node() *Node { + if len(it.nodes) == 0 { + return nil + } + return it.nodes[it.index] +} + +func (it *sliceIter) Close() { + it.mu.Lock() + defer it.mu.Unlock() + + it.nodes = nil +} + +// Filter wraps an iterator such that Next only returns nodes for which +// the 'check' function returns true. +func Filter(it Iterator, check func(*Node) bool) Iterator { + return &filterIter{it, check} +} + +type filterIter struct { + Iterator + check func(*Node) bool +} + +func (f *filterIter) Next() bool { + for f.Iterator.Next() { + if f.check(f.Node()) { + return true + } + } + return false +} + +// FairMix aggregates multiple node iterators. The mixer itself is an iterator which ends +// only when Close is called. Source iterators added via AddSource are removed from the +// mix when they end. +// +// The distribution of nodes returned by Next is approximately fair, i.e. FairMix +// attempts to draw from all sources equally often. However, if a certain source is slow +// and doesn't return a node within the configured timeout, a node from any other source +// will be returned. +// +// It's safe to call AddSource and Close concurrently with Next. +type FairMix struct { + wg sync.WaitGroup + fromAny chan *Node + timeout time.Duration + cur *Node + + mu sync.Mutex + closed chan struct{} + sources []*mixSource + last int +} + +type mixSource struct { + it Iterator + next chan *Node + timeout time.Duration +} + +// NewFairMix creates a mixer. +// +// The timeout specifies how long the mixer will wait for the next fairly-chosen source +// before giving up and taking a node from any other source. A good way to set the timeout +// is deciding how long you'd want to wait for a node on average. Passing a negative +// timeout makes the mixer completely fair. +func NewFairMix(timeout time.Duration) *FairMix { + m := &FairMix{ + fromAny: make(chan *Node), + closed: make(chan struct{}), + timeout: timeout, + } + return m +} + +// AddSource adds a source of nodes. +func (m *FairMix) AddSource(it Iterator) { + m.mu.Lock() + defer m.mu.Unlock() + + if m.closed == nil { + return + } + m.wg.Add(1) + source := &mixSource{it, make(chan *Node), m.timeout} + m.sources = append(m.sources, source) + go m.runSource(m.closed, source) +} + +// Close shuts down the mixer and all current sources. +// Calling this is required to release resources associated with the mixer. +func (m *FairMix) Close() { + m.mu.Lock() + defer m.mu.Unlock() + + if m.closed == nil { + return + } + for _, s := range m.sources { + s.it.Close() + } + close(m.closed) + m.wg.Wait() + close(m.fromAny) + m.sources = nil + m.closed = nil +} + +// Next returns a node from a random source. +func (m *FairMix) Next() bool { + m.cur = nil + + var timeout <-chan time.Time + if m.timeout >= 0 { + timer := time.NewTimer(m.timeout) + timeout = timer.C + defer timer.Stop() + } + for { + source := m.pickSource() + if source == nil { + return m.nextFromAny() + } + select { + case n, ok := <-source.next: + if ok { + m.cur = n + source.timeout = m.timeout + return true + } + // This source has ended. + m.deleteSource(source) + case <-timeout: + source.timeout /= 2 + return m.nextFromAny() + } + } +} + +// Node returns the current node. +func (m *FairMix) Node() *Node { + return m.cur +} + +// nextFromAny is used when there are no sources or when the 'fair' choice +// doesn't turn up a node quickly enough. +func (m *FairMix) nextFromAny() bool { + n, ok := <-m.fromAny + if ok { + m.cur = n + } + return ok +} + +// pickSource chooses the next source to read from, cycling through them in order. +func (m *FairMix) pickSource() *mixSource { + m.mu.Lock() + defer m.mu.Unlock() + + if len(m.sources) == 0 { + return nil + } + m.last = (m.last + 1) % len(m.sources) + return m.sources[m.last] +} + +// deleteSource deletes a source. +func (m *FairMix) deleteSource(s *mixSource) { + m.mu.Lock() + defer m.mu.Unlock() + + for i := range m.sources { + if m.sources[i] == s { + copy(m.sources[i:], m.sources[i+1:]) + m.sources[len(m.sources)-1] = nil + m.sources = m.sources[:len(m.sources)-1] + break + } + } +} + +// runSource reads a single source in a loop. +func (m *FairMix) runSource(closed chan struct{}, s *mixSource) { + defer m.wg.Done() + defer close(s.next) + for s.it.Next() { + n := s.it.Node() + select { + case s.next <- n: + case m.fromAny <- n: + case <-closed: + return + } + } +} diff --git a/p2p/enode/iter_test.go b/p2p/enode/iter_test.go new file mode 100644 index 000000000..d11d419f6 --- /dev/null +++ b/p2p/enode/iter_test.go @@ -0,0 +1,291 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package enode + +import ( + "encoding/binary" + "runtime" + "sync/atomic" + "testing" + "time" + + "github.com/ethersocial/go-ethersocial/p2p/enr" +) + +func TestReadNodes(t *testing.T) { + nodes := ReadNodes(new(genIter), 10) + checkNodes(t, nodes, 10) +} + +// This test checks that ReadNodes terminates when reading N nodes from an iterator +// which returns less than N nodes in an endless cycle. +func TestReadNodesCycle(t *testing.T) { + iter := &callCountIter{ + Iterator: CycleNodes([]*Node{ + testNode(0, 0), + testNode(1, 0), + testNode(2, 0), + }), + } + nodes := ReadNodes(iter, 10) + checkNodes(t, nodes, 3) + if iter.count != 10 { + t.Fatalf("%d calls to Next, want %d", iter.count, 100) + } +} + +func TestFilterNodes(t *testing.T) { + nodes := make([]*Node, 100) + for i := range nodes { + nodes[i] = testNode(uint64(i), uint64(i)) + } + + it := Filter(IterNodes(nodes), func(n *Node) bool { + return n.Seq() >= 50 + }) + for i := 50; i < len(nodes); i++ { + if !it.Next() { + t.Fatal("Next returned false") + } + if it.Node() != nodes[i] { + t.Fatalf("iterator returned wrong node %v\nwant %v", it.Node(), nodes[i]) + } + } + if it.Next() { + t.Fatal("Next returned true after underlying iterator has ended") + } +} + +func checkNodes(t *testing.T, nodes []*Node, wantLen int) { + if len(nodes) != wantLen { + t.Errorf("slice has %d nodes, want %d", len(nodes), wantLen) + return + } + seen := make(map[ID]bool) + for i, e := range nodes { + if e == nil { + t.Errorf("nil node at index %d", i) + return + } + if seen[e.ID()] { + t.Errorf("slice has duplicate node %v", e.ID()) + return + } + seen[e.ID()] = true + } +} + +// This test checks fairness of FairMix in the happy case where all sources return nodes +// within the context's deadline. +func TestFairMix(t *testing.T) { + for i := 0; i < 500; i++ { + testMixerFairness(t) + } +} + +func testMixerFairness(t *testing.T) { + mix := NewFairMix(1 * time.Second) + mix.AddSource(&genIter{index: 1}) + mix.AddSource(&genIter{index: 2}) + mix.AddSource(&genIter{index: 3}) + defer mix.Close() + + nodes := ReadNodes(mix, 500) + checkNodes(t, nodes, 500) + + // Verify that the nodes slice contains an approximately equal number of nodes + // from each source. + d := idPrefixDistribution(nodes) + for _, count := range d { + if approxEqual(count, len(nodes)/3, 30) { + t.Fatalf("ID distribution is unfair: %v", d) + } + } +} + +// This test checks that FairMix falls back to an alternative source when +// the 'fair' choice doesn't return a node within the timeout. +func TestFairMixNextFromAll(t *testing.T) { + mix := NewFairMix(1 * time.Millisecond) + mix.AddSource(&genIter{index: 1}) + mix.AddSource(CycleNodes(nil)) + defer mix.Close() + + nodes := ReadNodes(mix, 500) + checkNodes(t, nodes, 500) + + d := idPrefixDistribution(nodes) + if len(d) > 1 || d[1] != len(nodes) { + t.Fatalf("wrong ID distribution: %v", d) + } +} + +// This test ensures FairMix works for Next with no sources. +func TestFairMixEmpty(t *testing.T) { + var ( + mix = NewFairMix(1 * time.Second) + testN = testNode(1, 1) + ch = make(chan *Node) + ) + defer mix.Close() + + go func() { + mix.Next() + ch <- mix.Node() + }() + + mix.AddSource(CycleNodes([]*Node{testN})) + if n := <-ch; n != testN { + t.Errorf("got wrong node: %v", n) + } +} + +// This test checks closing a source while Next runs. +func TestFairMixRemoveSource(t *testing.T) { + mix := NewFairMix(1 * time.Second) + source := make(blockingIter) + mix.AddSource(source) + + sig := make(chan *Node) + go func() { + <-sig + mix.Next() + sig <- mix.Node() + }() + + sig <- nil + runtime.Gosched() + source.Close() + + wantNode := testNode(0, 0) + mix.AddSource(CycleNodes([]*Node{wantNode})) + n := <-sig + + if len(mix.sources) != 1 { + t.Fatalf("have %d sources, want one", len(mix.sources)) + } + if n != wantNode { + t.Fatalf("mixer returned wrong node") + } +} + +type blockingIter chan struct{} + +func (it blockingIter) Next() bool { + <-it + return false +} + +func (it blockingIter) Node() *Node { + return nil +} + +func (it blockingIter) Close() { + close(it) +} + +func TestFairMixClose(t *testing.T) { + for i := 0; i < 20 && !t.Failed(); i++ { + testMixerClose(t) + } +} + +func testMixerClose(t *testing.T) { + mix := NewFairMix(-1) + mix.AddSource(CycleNodes(nil)) + mix.AddSource(CycleNodes(nil)) + + done := make(chan struct{}) + go func() { + defer close(done) + if mix.Next() { + t.Error("Next returned true") + } + }() + // This call is supposed to make it more likely that NextNode is + // actually executing by the time we call Close. + runtime.Gosched() + + mix.Close() + select { + case <-done: + case <-time.After(3 * time.Second): + t.Fatal("Next didn't unblock on Close") + } + + mix.Close() // shouldn't crash +} + +func idPrefixDistribution(nodes []*Node) map[uint32]int { + d := make(map[uint32]int) + for _, node := range nodes { + id := node.ID() + d[binary.BigEndian.Uint32(id[:4])]++ + } + return d +} + +func approxEqual(x, y, ε int) bool { + if y > x { + x, y = y, x + } + return x-y > ε +} + +// genIter creates fake nodes with numbered IDs based on 'index' and 'gen' +type genIter struct { + node *Node + index, gen uint32 +} + +func (s *genIter) Next() bool { + index := atomic.LoadUint32(&s.index) + if index == ^uint32(0) { + s.node = nil + return false + } + s.node = testNode(uint64(index)<<32|uint64(s.gen), 0) + s.gen++ + return true +} + +func (s *genIter) Node() *Node { + return s.node +} + +func (s *genIter) Close() { + s.index = ^uint32(0) +} + +func testNode(id, seq uint64) *Node { + var nodeID ID + binary.BigEndian.PutUint64(nodeID[:], id) + r := new(enr.Record) + r.SetSeq(seq) + return SignNull(r, nodeID) +} + +// callCountIter counts calls to NextNode. +type callCountIter struct { + Iterator + count int +} + +func (it *callCountIter) Next() bool { + it.count++ + return it.Iterator.Next() +} diff --git a/p2p/protocol.go b/p2p/protocol.go index 397b29953..dcf8834e7 100644 --- a/p2p/protocol.go +++ b/p2p/protocol.go @@ -54,6 +54,11 @@ type Protocol struct { // but returns nil, it is assumed that the protocol handshake is still running. PeerInfo func(id enode.ID) interface{} + // DialCandidates, if non-nil, is a way to tell Server about protocol-specific nodes + // that should be dialed. The server continuously reads nodes from the iterator and + // attempts to create connections to them. + DialCandidates enode.Iterator + // Attributes contains protocol specific information for the node record. Attributes []enr.Entry } diff --git a/p2p/server.go b/p2p/server.go index 5cb89dd83..ee15c87e9 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -45,6 +45,11 @@ import ( const ( defaultDialTimeout = 15 * time.Second + // This is the fairness knob for the discovery mixer. When looking for peers, we'll + // wait this long for a single source of candidates before moving on and trying other + // sources. + discmixTimeout = 5 * time.Second + // Connectivity defaults. maxActiveDialTasks = 16 defaultMaxPendingPeers = 50 @@ -167,16 +172,20 @@ type Server struct { lock sync.Mutex // protects running running bool - nodedb *enode.DB - localnode *enode.LocalNode - ntab discoverTable listener net.Listener ourHandshake *protoHandshake - DiscV5 *discv5.Network loopWG sync.WaitGroup // loop, listenLoop peerFeed event.Feed log log.Logger + nodedb *enode.DB + localnode *enode.LocalNode + ntab *discover.UDPv4 + DiscV5 *discv5.Network + discmix *enode.FairMix + + staticNodeResolver nodeResolver + // Channels into the run loop. quit chan struct{} addstatic chan *enode.Node @@ -470,7 +479,7 @@ func (srv *Server) Start() (err error) { } dynPeers := srv.maxDialedConns() - dialer := newDialState(srv.localnode.ID(), srv.ntab, dynPeers, &srv.Config) + dialer := newDialState(srv.localnode.ID(), dynPeers, &srv.Config) srv.loopWG.Add(1) go srv.run(dialer) return nil @@ -521,6 +530,18 @@ func (srv *Server) setupLocalNode() error { } func (srv *Server) setupDiscovery() error { + srv.discmix = enode.NewFairMix(discmixTimeout) + + // Add protocol-specific discovery sources. + added := make(map[string]bool) + for _, proto := range srv.Protocols { + if proto.DialCandidates != nil && !added[proto.Name] { + srv.discmix.AddSource(proto.DialCandidates) + added[proto.Name] = true + } + } + + // Don't listen on UDP endpoint if DHT is disabled. if srv.NoDiscovery && !srv.DiscoveryV5 { return nil } @@ -562,7 +583,10 @@ func (srv *Server) setupDiscovery() error { return err } srv.ntab = ntab + srv.discmix.AddSource(ntab.RandomNodes()) + srv.staticNodeResolver = ntab } + // Discovery V5 if srv.DiscoveryV5 { var ntab *discv5.Network @@ -620,6 +644,7 @@ func (srv *Server) run(dialstate dialer) { srv.log.Info("Started P2P networking", "self", srv.localnode.Node().URLv4()) defer srv.loopWG.Done() defer srv.nodedb.Close() + defer srv.discmix.Close() var ( peers = make(map[enode.ID]*Peer) diff --git a/p2p/server_test.go b/p2p/server_test.go index 77c519e49..f470c780e 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -233,8 +233,8 @@ func TestServerTaskScheduling(t *testing.T) { Config: Config{MaxPeers: 10}, localnode: enode.NewLocalNode(db, newkey()), nodedb: db, + discmix: enode.NewFairMix(0), quit: make(chan struct{}), - ntab: fakeTable{}, running: true, log: log.New(), } @@ -282,9 +282,9 @@ func TestServerManyTasks(t *testing.T) { quit: make(chan struct{}), localnode: enode.NewLocalNode(db, newkey()), nodedb: db, - ntab: fakeTable{}, running: true, log: log.New(), + discmix: enode.NewFairMix(0), } done = make(chan *testTask) start, end = 0, 0 diff --git a/p2p/simulations/adapters/types.go b/p2p/simulations/adapters/types.go index 3b1ecf849..6b5048746 100644 --- a/p2p/simulations/adapters/types.go +++ b/p2p/simulations/adapters/types.go @@ -101,6 +101,11 @@ type NodeConfig struct { // services registered by calling the RegisterService function) Services []string + // Properties are the names of the properties this node should hold + // within running services (e.g. "bootnode", "lightnode" or any custom values) + // These values need to be checked and acted upon by node Services + Properties []string + // Enode node *enode.Node @@ -120,6 +125,7 @@ type nodeConfigJSON struct { PrivateKey string `json:"private_key"` Name string `json:"name"` Services []string `json:"services"` + Properties []string `json:"properties"` EnableMsgEvents bool `json:"enable_msg_events"` Port uint16 `json:"port"` } @@ -131,6 +137,7 @@ func (n *NodeConfig) MarshalJSON() ([]byte, error) { ID: n.ID.String(), Name: n.Name, Services: n.Services, + Properties: n.Properties, Port: n.Port, EnableMsgEvents: n.EnableMsgEvents, } @@ -168,6 +175,7 @@ func (n *NodeConfig) UnmarshalJSON(data []byte) error { n.Name = confJSON.Name n.Services = confJSON.Services + n.Properties = confJSON.Properties n.Port = confJSON.Port n.EnableMsgEvents = confJSON.EnableMsgEvents diff --git a/p2p/simulations/network.go b/p2p/simulations/network.go index 85e2987de..e6f582e53 100644 --- a/p2p/simulations/network.go +++ b/p2p/simulations/network.go @@ -56,6 +56,9 @@ type Network struct { Nodes []*Node `json:"nodes"` nodeMap map[enode.ID]int + // Maps a node property string to node indexes of all nodes that hold this property + propertyMap map[string][]int + Conns []*Conn `json:"conns"` connMap map[string]int @@ -71,6 +74,7 @@ func NewNetwork(nodeAdapter adapters.NodeAdapter, conf *NetworkConfig) *Network NetworkConfig: *conf, nodeAdapter: nodeAdapter, nodeMap: make(map[enode.ID]int), + propertyMap: make(map[string][]int), connMap: make(map[string]int), quitc: make(chan struct{}), } @@ -120,9 +124,16 @@ func (net *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error) Config: conf, } log.Trace("Node created", "id", conf.ID) - net.nodeMap[conf.ID] = len(net.Nodes) + + nodeIndex := len(net.Nodes) + net.nodeMap[conf.ID] = nodeIndex net.Nodes = append(net.Nodes, node) + // Register any node properties with the network-level propertyMap + for _, property := range conf.Properties { + net.propertyMap[property] = append(net.propertyMap[property], nodeIndex) + } + // emit a "control" event net.events.Send(ControlEvent(node)) @@ -410,7 +421,7 @@ func (net *Network) getNode(id enode.ID) *Node { return net.Nodes[i] } -// GetNode gets the node with the given name, returning nil if the node does +// GetNodeByName gets the node with the given name, returning nil if the node does // not exist func (net *Network) GetNodeByName(name string) *Node { net.lock.RLock() @@ -427,19 +438,104 @@ func (net *Network) getNodeByName(name string) *Node { return nil } -// GetNodes returns the existing nodes -func (net *Network) GetNodes() (nodes []*Node) { +// GetNodeIDs returns the IDs of all existing nodes +// Nodes can optionally be excluded by specifying their enode.ID. +func (net *Network) GetNodeIDs(excludeIDs ...enode.ID) []enode.ID { + net.lock.RLock() + defer net.lock.RUnlock() + + return net.getNodeIDs(excludeIDs) +} + +func (net *Network) getNodeIDs(excludeIDs []enode.ID) []enode.ID { + // Get all curent nodeIDs + nodeIDs := make([]enode.ID, 0, len(net.nodeMap)) + for id := range net.nodeMap { + nodeIDs = append(nodeIDs, id) + } + + if len(excludeIDs) > 0 { + // Return the difference of nodeIDs and excludeIDs + return filterIDs(nodeIDs, excludeIDs) + } else { + return nodeIDs + } +} + +// GetNodes returns the existing nodes. +// Nodes can optionally be excluded by specifying their enode.ID. +func (net *Network) GetNodes(excludeIDs ...enode.ID) []*Node { + net.lock.RLock() + defer net.lock.RUnlock() + + return net.getNodes(excludeIDs) +} + +func (net *Network) getNodes(excludeIDs []enode.ID) []*Node { + if len(excludeIDs) > 0 { + nodeIDs := net.getNodeIDs(excludeIDs) + return net.getNodesByID(nodeIDs) + } else { + return net.Nodes + } +} + +// GetNodesByID returns existing nodes with the given enode.IDs. +// If a node doesn't exist with a given enode.ID, it is ignored. +func (net *Network) GetNodesByID(nodeIDs []enode.ID) []*Node { + net.lock.RLock() + defer net.lock.RUnlock() + + return net.getNodesByID(nodeIDs) +} + +func (net *Network) getNodesByID(nodeIDs []enode.ID) []*Node { + nodes := make([]*Node, 0, len(nodeIDs)) + for _, id := range nodeIDs { + node := net.getNode(id) + if node != nil { + nodes = append(nodes, node) + } + } + + return nodes +} + +// GetNodesByProperty returns existing nodes that have the given property string registered in their NodeConfig +func (net *Network) GetNodesByProperty(property string) []*Node { net.lock.RLock() defer net.lock.RUnlock() - return net.getNodes() + return net.getNodesByProperty(property) } -func (net *Network) getNodes() (nodes []*Node) { - nodes = append(nodes, net.Nodes...) +func (net *Network) getNodesByProperty(property string) []*Node { + nodes := make([]*Node, 0, len(net.propertyMap[property])) + for _, nodeIndex := range net.propertyMap[property] { + nodes = append(nodes, net.Nodes[nodeIndex]) + } + return nodes } +// GetNodeIDsByProperty returns existing node's enode IDs that have the given property string registered in the NodeConfig +func (net *Network) GetNodeIDsByProperty(property string) []enode.ID { + net.lock.RLock() + defer net.lock.RUnlock() + + return net.getNodeIDsByProperty(property) +} + +func (net *Network) getNodeIDsByProperty(property string) []enode.ID { + nodeIDs := make([]enode.ID, 0, len(net.propertyMap[property])) + for _, nodeIndex := range net.propertyMap[property] { + node := net.Nodes[nodeIndex] + nodeIDs = append(nodeIDs, node.ID()) + } + + return nodeIDs +} + // GetRandomUpNode returns a random node on the network, which is running. func (net *Network) GetRandomUpNode(excludeIDs ...enode.ID) *Node { net.lock.RLock() @@ -469,7 +565,7 @@ func (net *Network) GetRandomDownNode(excludeIDs ...enode.ID) *Node { } func (net *Network) getDownNodeIDs() (ids []enode.ID) { - for _, node := range net.getNodes() { + for _, node := range net.Nodes { if !node.Up() { ids = append(ids, node.ID()) } @@ -477,6 +573,13 @@ func (net *Network) getDownNodeIDs() (ids []enode.ID) { return ids } +// GetRandomNode returns a random node on the network, regardless of whether it is running or not +func (net *Network) GetRandomNode(excludeIDs ...enode.ID) *Node { + net.lock.RLock() + defer net.lock.RUnlock() + return net.getRandomNode(net.getNodeIDs(nil), excludeIDs) // no need to exclude twice +} + func (net *Network) getRandomNode(ids []enode.ID, excludeIDs []enode.ID) *Node { filtered := filterIDs(ids, excludeIDs) @@ -616,6 +719,7 @@ func (net *Network) Reset() { //re-initialize the maps net.connMap = make(map[string]int) net.nodeMap = make(map[enode.ID]int) + net.propertyMap = make(map[string][]int) net.Nodes = nil net.Conns = nil @@ -634,12 +738,14 @@ type Node struct { upMu sync.RWMutex } +// Up returns whether the node is currently up (online) func (n *Node) Up() bool { n.upMu.RLock() defer n.upMu.RUnlock() return n.up } +// SetUp sets the up (online) status of the nodes with the given value func (n *Node) SetUp(up bool) { n.upMu.Lock() defer n.upMu.Unlock() diff --git a/p2p/simulations/network_test.go b/p2p/simulations/network_test.go index 3166c6637..292c4bbd8 100644 --- a/p2p/simulations/network_test.go +++ b/p2p/simulations/network_test.go @@ -17,6 +17,7 @@ package simulations import ( + "bytes" "context" "encoding/json" "fmt" @@ -393,6 +394,275 @@ func TestNetworkSimulation(t *testing.T) { } } +func createTestNodes(count int, network *Network) (nodes []*Node, err error) { + for i := 0; i < count; i++ { + nodeConf := adapters.RandomNodeConfig() + node, err := network.NewNodeWithConfig(nodeConf) + if err != nil { + return nil, err + } + if err := network.Start(node.ID()); err != nil { + return nil, err + } + + nodes = append(nodes, node) + } + + return nodes, nil +} + +func createTestNodesWithProperty(property string, count int, network *Network) (propertyNodes []*Node, err error) { + for i := 0; i < count; i++ { + nodeConf := adapters.RandomNodeConfig() + nodeConf.Properties = append(nodeConf.Properties, property) + + node, err := network.NewNodeWithConfig(nodeConf) + if err != nil { + return nil, err + } + if err := network.Start(node.ID()); err != nil { + return nil, err + } + + propertyNodes = append(propertyNodes, node) + } + + return propertyNodes, nil +} + +// TestGetNodeIDs creates a set of nodes and attempts to retrieve their IDs,. +// It then tests again whilst excluding a node ID from being returned. +// If a node ID is not returned, or more node IDs than expected are returned, the test fails. +func TestGetNodeIDs(t *testing.T) { + adapter := adapters.NewSimAdapter(adapters.Services{ + "test": newTestService, + }) + network := NewNetwork(adapter, &NetworkConfig{ + DefaultService: "test", + }) + defer network.Shutdown() + + numNodes := 5 + nodes, err := createTestNodes(numNodes, network) + if err != nil { + t.Fatalf("Could not creat test nodes %v", err) + } + + gotNodeIDs := network.GetNodeIDs() + if len(gotNodeIDs) != numNodes { + t.Fatalf("Expected %d nodes, got %d", numNodes, len(gotNodeIDs)) + } + + for _, node1 := range nodes { + match := false + for _, node2ID := range gotNodeIDs { + if bytes.Equal(node1.ID().Bytes(), node2ID.Bytes()) { + match = true + break + } + } + + if !match { + t.Fatalf("A created node was not returned by GetNodes(), ID: %s", node1.ID().String()) + } + } + + excludeNodeID := nodes[3].ID() + gotNodeIDsExcl := network.GetNodeIDs(excludeNodeID) + if len(gotNodeIDsExcl) != numNodes-1 { + t.Fatalf("Expected one less node ID to be returned") + } + for _, nodeID := range gotNodeIDsExcl { + if bytes.Equal(excludeNodeID.Bytes(), nodeID.Bytes()) { + t.Fatalf("GetNodeIDs returned the node ID we excluded, ID: %s", nodeID.String()) + } + } +} + +// TestGetNodes creates a set of nodes and attempts to retrieve them again. +// It then tests again whilst excluding a node from being returned. +// If a node is not returned, or more nodes than expected are returned, the test fails. +func TestGetNodes(t *testing.T) { + adapter := adapters.NewSimAdapter(adapters.Services{ + "test": newTestService, + }) + network := NewNetwork(adapter, &NetworkConfig{ + DefaultService: "test", + }) + defer network.Shutdown() + + numNodes := 5 + nodes, err := createTestNodes(numNodes, network) + if err != nil { + t.Fatalf("Could not creat test nodes %v", err) + } + + gotNodes := network.GetNodes() + if len(gotNodes) != numNodes { + t.Fatalf("Expected %d nodes, got %d", numNodes, len(gotNodes)) + } + + for _, node1 := range nodes { + match := false + for _, node2 := range gotNodes { + if bytes.Equal(node1.ID().Bytes(), node2.ID().Bytes()) { + match = true + break + } + } + + if !match { + t.Fatalf("A created node was not returned by GetNodes(), ID: %s", node1.ID().String()) + } + } + + excludeNodeID := nodes[3].ID() + gotNodesExcl := network.GetNodes(excludeNodeID) + if len(gotNodesExcl) != numNodes-1 { + t.Fatalf("Expected one less node to be returned") + } + for _, node := range gotNodesExcl { + if bytes.Equal(excludeNodeID.Bytes(), node.ID().Bytes()) { + t.Fatalf("GetNodes returned the node we excluded, ID: %s", node.ID().String()) + } + } +} + +// TestGetNodesByID creates a set of nodes and attempts to retrieve a subset of them by ID +// If a node is not returned, or more nodes than expected are returned, the test fails. +func TestGetNodesByID(t *testing.T) { + adapter := adapters.NewSimAdapter(adapters.Services{ + "test": newTestService, + }) + network := NewNetwork(adapter, &NetworkConfig{ + DefaultService: "test", + }) + defer network.Shutdown() + + numNodes := 5 + nodes, err := createTestNodes(numNodes, network) + if err != nil { + t.Fatalf("Could not create test nodes: %v", err) + } + + numSubsetNodes := 2 + subsetNodes := nodes[0:numSubsetNodes] + var subsetNodeIDs []enode.ID + for _, node := range subsetNodes { + subsetNodeIDs = append(subsetNodeIDs, node.ID()) + } + + gotNodesByID := network.GetNodesByID(subsetNodeIDs) + if len(gotNodesByID) != numSubsetNodes { + t.Fatalf("Expected %d nodes, got %d", numSubsetNodes, len(gotNodesByID)) + } + + for _, node1 := range subsetNodes { + match := false + for _, node2 := range gotNodesByID { + if bytes.Equal(node1.ID().Bytes(), node2.ID().Bytes()) { + match = true + break + } + } + + if !match { + t.Fatalf("A created node was not returned by GetNodesByID(), ID: %s", node1.ID().String()) + } + } +} + +// TestGetNodesByProperty creates a subset of nodes with a property assigned. +// GetNodesByProperty is then checked for correctness by comparing the nodes returned to those initially created. +// If a node with a property is not found, or more nodes than expected are returned, the test fails. +func TestGetNodesByProperty(t *testing.T) { + adapter := adapters.NewSimAdapter(adapters.Services{ + "test": newTestService, + }) + network := NewNetwork(adapter, &NetworkConfig{ + DefaultService: "test", + }) + defer network.Shutdown() + + numNodes := 3 + _, err := createTestNodes(numNodes, network) + if err != nil { + t.Fatalf("Failed to create nodes: %v", err) + } + + numPropertyNodes := 3 + propertyTest := "test" + propertyNodes, err := createTestNodesWithProperty(propertyTest, numPropertyNodes, network) + if err != nil { + t.Fatalf("Failed to create nodes with property: %v", err) + } + + gotNodesByProperty := network.GetNodesByProperty(propertyTest) + if len(gotNodesByProperty) != numPropertyNodes { + t.Fatalf("Expected %d nodes with a property, got %d", numPropertyNodes, len(gotNodesByProperty)) + } + + for _, node1 := range propertyNodes { + match := false + for _, node2 := range gotNodesByProperty { + if bytes.Equal(node1.ID().Bytes(), node2.ID().Bytes()) { + match = true + break + } + } + + if !match { + t.Fatalf("A created node with property was not returned by GetNodesByProperty(), ID: %s", node1.ID().String()) + } + } +} + +// TestGetNodeIDsByProperty creates a subset of nodes with a property assigned. +// GetNodeIDsByProperty is then checked for correctness by comparing the node IDs returned to those initially created. +// If a node ID with a property is not found, or more nodes IDs than expected are returned, the test fails. +func TestGetNodeIDsByProperty(t *testing.T) { + adapter := adapters.NewSimAdapter(adapters.Services{ + "test": newTestService, + }) + network := NewNetwork(adapter, &NetworkConfig{ + DefaultService: "test", + }) + defer network.Shutdown() + + numNodes := 3 + _, err := createTestNodes(numNodes, network) + if err != nil { + t.Fatalf("Failed to create nodes: %v", err) + } + + numPropertyNodes := 3 + propertyTest := "test" + propertyNodes, err := createTestNodesWithProperty(propertyTest, numPropertyNodes, network) + if err != nil { + t.Fatalf("Failed to created nodes with property: %v", err) + } + + gotNodeIDsByProperty := network.GetNodeIDsByProperty(propertyTest) + if len(gotNodeIDsByProperty) != numPropertyNodes { + t.Fatalf("Expected %d nodes with a property, got %d", numPropertyNodes, len(gotNodeIDsByProperty)) + } + + for _, node1 := range propertyNodes { + match := false + id1 := node1.ID() + for _, id2 := range gotNodeIDsByProperty { + if bytes.Equal(id1.Bytes(), id2.Bytes()) { + match = true + break + } + } + + if !match { + t.Fatalf("Not all nodes IDs were returned by GetNodeIDsByProperty(), ID: %s", id1.String()) + } + } +} + func triggerChecks(ctx context.Context, ids []enode.ID, trigger chan enode.ID, interval time.Duration) { tick := time.NewTicker(interval) defer tick.Stop() diff --git a/params/config.go b/params/config.go index 355fbe8fd..76cbc4a9c 100644 --- a/params/config.go +++ b/params/config.go @@ -67,16 +67,16 @@ var ( ByzantiumBlock: big.NewInt(4370000), ConstantinopleBlock: big.NewInt(7280000), PetersburgBlock: big.NewInt(7280000), - IstanbulBlock: nil, + IstanbulBlock: big.NewInt(9069000), Ethash: new(EthashConfig), } // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network. MainnetTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 260, - SectionHead: common.HexToHash("0x613fc3c65f2abe9d66564c2d1f7c7600cd51a90a26bd9c0fda1ad9c6739428eb"), - CHTRoot: common.HexToHash("0x2a81a659f524be86929e4d34e4da05c024a68c9f44bd1184eae303802baa121e"), - BloomRoot: common.HexToHash("0x7718ec4b9ce11365b98063dc90808a87c7c1dc14c76e418a2a64a717688a601d"), + SectionIndex: 270, + SectionHead: common.HexToHash("0xb67c33d838a60c282c2fb49b188fbbac1ef8565ffb4a1c4909b0a05885e72e40"), + CHTRoot: common.HexToHash("0x781daa4607782300da85d440df3813ba38a1262585231e35e9480726de81dbfc"), + BloomRoot: common.HexToHash("0xfd8951fa6d779cbc981df40dc31056ed1a549db529349d7dfae016f9d96cae72"), } // MainnetCheckpointOracle contains a set of configs for the main network oracle. @@ -127,10 +127,10 @@ var ( // TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network. TestnetTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 194, - SectionHead: common.HexToHash("0x34b61d0b77bbbbc7747db9a786e5ac976a83ec0c7c0238d319ec95243754cfcc"), - CHTRoot: common.HexToHash("0x6793d6efd08e5f17074f5cfe3f32cc552a7514d967d03ea253b0c1cefec68f00"), - BloomRoot: common.HexToHash("0x07570f99a7d5dcdc95c40ec9145b65ecbda0c4e61f9f99fa9eff39d91a4d8ad5"), + SectionIndex: 204, + SectionHead: common.HexToHash("0xa39168b51c3205456f30ce6a91f3590a43295b15a1c8c2ab86bb8c06b8ad1808"), + CHTRoot: common.HexToHash("0x9a3654147b79882bfc4e16fbd3421512aa7e4dfadc6c511923980e0877bdf3b4"), + BloomRoot: common.HexToHash("0xe72b979522d94fa45c1331639316da234a9bb85062d64d72e13afe1d3f5c17d5"), } // TestnetCheckpointOracle contains a set of configs for the Ropsten test network oracle. @@ -184,10 +184,10 @@ var ( // RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network. RinkebyTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 155, - SectionHead: common.HexToHash("0x746df19e755fa6310ce3b00bb29229d590aa80002012e64ef648010b8b63db67"), - CHTRoot: common.HexToHash("0xc6a1093d82003141feb1052a96e0c95cea62d64c230f805d7835857d321c0c1a"), - BloomRoot: common.HexToHash("0x9d0fc2ea21c5850a9bceb73a82a405e9934788f56a5d20f81b0bb417497a9f92"), + SectionIndex: 163, + SectionHead: common.HexToHash("0x36e5deaa46f258bece94b05d8e10f1ef68f422fb62ed47a2b6e616aa26e84997"), + CHTRoot: common.HexToHash("0x829b9feca1c2cdf5a4cf3efac554889e438ee4df8718c2ce3e02555a02d9e9e5"), + BloomRoot: common.HexToHash("0x58c01de24fdae7c082ebbe7665f189d0aa4d90ee10e72086bf56651c63269e54"), } // RinkebyCheckpointOracle contains a set of configs for the Rinkeby test network oracle. @@ -223,10 +223,10 @@ var ( // GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network. GoerliTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 39, - SectionHead: common.HexToHash("0x512b5c1533d6faa3b968f095e61ad96007fda91ea296af433d96045580d9c140"), - CHTRoot: common.HexToHash("0x1a8e8ffd2cae5de61c2d06a686944b7809064ce38109c0e3ca203b5ed363bb0e"), - BloomRoot: common.HexToHash("0x32a3fec7d590143b0ccc4f1bb9fc1d9e03a30894e625a1d44d5be60e7b932491"), + SectionIndex: 47, + SectionHead: common.HexToHash("0x00c5b54c6c9a73660501fd9273ccdb4c5bbdbe5d7b8b650e28f881ec9d2337f6"), + CHTRoot: common.HexToHash("0xef35caa155fd659f57167e7d507de2f8132cbb31f771526481211d8a977d704c"), + BloomRoot: common.HexToHash("0xbda330402f66008d52e7adc748da28535b1212a7912a21244acd2ba77ff0ff06"), } // GoerliCheckpointOracle contains a set of configs for the Goerli test network oracle. @@ -449,6 +449,50 @@ func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64) *Confi return lasterr } +// CheckConfigForkOrder checks that we don't "skip" any forks, geth isn't pluggable enough +// to guarantee that forks +func (c *ChainConfig) CheckConfigForkOrder() error { + type fork struct { + name string + block *big.Int + } + var lastFork fork + for _, cur := range []fork{ + {"homesteadBlock", c.HomesteadBlock}, + {"eip150Block", c.EIP150Block}, + {"eip155Block", c.EIP155Block}, + {"eip158Block", c.EIP158Block}, + {"byzantiumBlock", c.ByzantiumBlock}, + {"constantinopleBlock", c.ConstantinopleBlock}, + {"petersburgBlock", c.PetersburgBlock}, + {"istanbulBlock", c.IstanbulBlock}, + } { + if lastFork.name != "" { + // Next one must be higher number + if lastFork.block == nil && cur.block != nil { + return fmt.Errorf("unsupported fork ordering: %v not enabled, but %v enabled at %v", + lastFork.name, cur.name, cur.block) + } + if lastFork.block != nil && cur.block != nil { + // for Ethersocial network case + if c.ChainID != nil && (c.ChainID.Cmp(big.NewInt(31102)) == 0 || c.ChainID.Cmp(big.NewInt(131102)) == 0) { + if cur.block.Cmp(c.ByzantiumBlock) == 0 { + // ignore + continue + } + } + + if lastFork.block.Cmp(cur.block) > 0 { + return fmt.Errorf("unsupported fork ordering: %v enabled at %v, but %v enabled at %v", + lastFork.name, lastFork.block, cur.name, cur.block) + } + } + } + lastFork = cur + } + return nil +} + func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *ConfigCompatError { if isForkIncompatible(c.HomesteadBlock, newcfg.HomesteadBlock, head) { return newCompatError("Homestead fork block", c.HomesteadBlock, newcfg.HomesteadBlock) diff --git a/params/version.go b/params/version.go index ef1c8d1a5..bc2f429e3 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 0 // Major version component of the current release VersionMinor = 4 // Minor version component of the current release - VersionPatch = 1 // Patch version component of the current release + VersionPatch = 2 // Patch version component of the current release VersionMeta = "unstable" // Version metadata to append to the version string ) diff --git a/trie/sync.go b/trie/sync.go index ecb11851f..dc85fd9d6 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -57,14 +57,12 @@ type SyncResult struct { // persisted data items. type syncMemBatch struct { batch map[common.Hash][]byte // In-memory membatch of recently completed items - order []common.Hash // Order of completion to prevent out-of-order data loss } // newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes. func newSyncMemBatch() *syncMemBatch { return &syncMemBatch{ batch: make(map[common.Hash][]byte), - order: make([]common.Hash, 0, 256), } } @@ -223,20 +221,18 @@ func (s *Sync) Process(results []SyncResult) (bool, int, error) { } // Commit flushes the data stored in the internal membatch out to persistent -// storage, returning the number of items written and any occurred error. -func (s *Sync) Commit(dbw ethdb.KeyValueWriter) (int, error) { +// storage, returning any occurred error. +func (s *Sync) Commit(dbw ethdb.Batch) error { // Dump the membatch into a database dbw - for i, key := range s.membatch.order { - if err := dbw.Put(key[:], s.membatch.batch[key]); err != nil { - return i, err + for key, value := range s.membatch.batch { + if err := dbw.Put(key[:], value); err != nil { + return err } s.bloom.Add(key[:]) } - written := len(s.membatch.order) // TODO(karalabe): could an order change improve write performance? - // Drop the membatch data and return s.membatch = newSyncMemBatch() - return written, nil + return nil } // Pending returns the number of state entries currently pending for download. @@ -330,7 +326,6 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { func (s *Sync) commit(req *request) (err error) { // Write the node content to the membatch s.membatch.batch[req.hash] = req.data - s.membatch.order = append(s.membatch.order, req.hash) delete(s.requests, req.hash) diff --git a/trie/sync_test.go b/trie/sync_test.go index 769b0b237..c9d54211b 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -105,7 +105,7 @@ func TestEmptySync(t *testing.T) { func TestIterativeSyncIndividual(t *testing.T) { testIterativeSync(t, 1) } func TestIterativeSyncBatched(t *testing.T) { testIterativeSync(t, 100) } -func testIterativeSync(t *testing.T, batch int) { +func testIterativeSync(t *testing.T, count int) { // Create a random trie to copy srcDb, srcTrie, srcData := makeTestTrie() @@ -114,7 +114,7 @@ func testIterativeSync(t *testing.T, batch int) { triedb := NewDatabase(diskdb) sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) - queue := append([]common.Hash{}, sched.Missing(batch)...) + queue := append([]common.Hash{}, sched.Missing(count)...) for len(queue) > 0 { results := make([]SyncResult, len(queue)) for i, hash := range queue { @@ -127,10 +127,12 @@ func testIterativeSync(t *testing.T, batch int) { if _, index, err := sched.Process(results); err != nil { t.Fatalf("failed to process result #%d: %v", index, err) } - if index, err := sched.Commit(diskdb); err != nil { - t.Fatalf("failed to commit data #%d: %v", index, err) + batch := diskdb.NewBatch() + if err := sched.Commit(batch); err != nil { + t.Fatalf("failed to commit data: %v", err) } - queue = append(queue[:0], sched.Missing(batch)...) + batch.Write() + queue = append(queue[:0], sched.Missing(count)...) } // Cross check that the two tries are in sync checkTrieContents(t, triedb, srcTrie.Hash().Bytes(), srcData) @@ -161,9 +163,11 @@ func TestIterativeDelayedSync(t *testing.T) { if _, index, err := sched.Process(results); err != nil { t.Fatalf("failed to process result #%d: %v", index, err) } - if index, err := sched.Commit(diskdb); err != nil { - t.Fatalf("failed to commit data #%d: %v", index, err) + batch := diskdb.NewBatch() + if err := sched.Commit(batch); err != nil { + t.Fatalf("failed to commit data: %v", err) } + batch.Write() queue = append(queue[len(results):], sched.Missing(10000)...) } // Cross check that the two tries are in sync @@ -176,7 +180,7 @@ func TestIterativeDelayedSync(t *testing.T) { func TestIterativeRandomSyncIndividual(t *testing.T) { testIterativeRandomSync(t, 1) } func TestIterativeRandomSyncBatched(t *testing.T) { testIterativeRandomSync(t, 100) } -func testIterativeRandomSync(t *testing.T, batch int) { +func testIterativeRandomSync(t *testing.T, count int) { // Create a random trie to copy srcDb, srcTrie, srcData := makeTestTrie() @@ -186,7 +190,7 @@ func testIterativeRandomSync(t *testing.T, batch int) { sched := NewSync(srcTrie.Hash(), diskdb, nil, NewSyncBloom(1, diskdb)) queue := make(map[common.Hash]struct{}) - for _, hash := range sched.Missing(batch) { + for _, hash := range sched.Missing(count) { queue[hash] = struct{}{} } for len(queue) > 0 { @@ -203,11 +207,13 @@ func testIterativeRandomSync(t *testing.T, batch int) { if _, index, err := sched.Process(results); err != nil { t.Fatalf("failed to process result #%d: %v", index, err) } - if index, err := sched.Commit(diskdb); err != nil { - t.Fatalf("failed to commit data #%d: %v", index, err) + batch := diskdb.NewBatch() + if err := sched.Commit(batch); err != nil { + t.Fatalf("failed to commit data: %v", err) } + batch.Write() queue = make(map[common.Hash]struct{}) - for _, hash := range sched.Missing(batch) { + for _, hash := range sched.Missing(count) { queue[hash] = struct{}{} } } @@ -248,9 +254,11 @@ func TestIterativeRandomDelayedSync(t *testing.T) { if _, index, err := sched.Process(results); err != nil { t.Fatalf("failed to process result #%d: %v", index, err) } - if index, err := sched.Commit(diskdb); err != nil { - t.Fatalf("failed to commit data #%d: %v", index, err) + batch := diskdb.NewBatch() + if err := sched.Commit(batch); err != nil { + t.Fatalf("failed to commit data: %v", err) } + batch.Write() for _, result := range results { delete(queue, result.Hash) } @@ -293,9 +301,11 @@ func TestDuplicateAvoidanceSync(t *testing.T) { if _, index, err := sched.Process(results); err != nil { t.Fatalf("failed to process result #%d: %v", index, err) } - if index, err := sched.Commit(diskdb); err != nil { - t.Fatalf("failed to commit data #%d: %v", index, err) + batch := diskdb.NewBatch() + if err := sched.Commit(batch); err != nil { + t.Fatalf("failed to commit data: %v", err) } + batch.Write() queue = append(queue[:0], sched.Missing(0)...) } // Cross check that the two tries are in sync @@ -329,9 +339,11 @@ func TestIncompleteSync(t *testing.T) { if _, index, err := sched.Process(results); err != nil { t.Fatalf("failed to process result #%d: %v", index, err) } - if index, err := sched.Commit(diskdb); err != nil { - t.Fatalf("failed to commit data #%d: %v", index, err) + batch := diskdb.NewBatch() + if err := sched.Commit(batch); err != nil { + t.Fatalf("failed to commit data: %v", err) } + batch.Write() for _, result := range results { added = append(added, result.Hash) }