From cdf9869d70652e3603ab5d02448f98c5ff3ff44b Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Sat, 2 Apr 2022 22:21:53 +0200 Subject: [PATCH 01/18] Encapsulation of applier logic: Move Txn related code out of applier.go. The PR removes calls to applierV3base logic from server.go that is NOT part of 'application'. The original idea was that read-only transaction and Range call shared logic with Apply, so they can call appliers directly (but bypassing all 'corrupt', 'quota' and 'auth' wrappers). This PR moves all the logic to a separate file (that later can become package on its own). --- server/etcdserver/apply.go | 576 +------------------------------ server/etcdserver/server.go | 3 - server/etcdserver/txn.go | 597 +++++++++++++++++++++++++++++++++ server/etcdserver/v3_server.go | 9 +- 4 files changed, 608 insertions(+), 577 deletions(-) create mode 100644 server/etcdserver/txn.go diff --git a/server/etcdserver/apply.go b/server/etcdserver/apply.go index 7eb53ebcc61..afef0926389 100644 --- a/server/etcdserver/apply.go +++ b/server/etcdserver/apply.go @@ -15,17 +15,14 @@ package etcdserver import ( - "bytes" "context" "fmt" - "sort" "strconv" "time" "github.com/coreos/go-semver/semver" pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/membershippb" - "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/auth" @@ -99,24 +96,12 @@ type applierV3 interface { RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) } -type checkReqFunc func(mvcc.ReadView, *pb.RequestOp) error - type applierV3backend struct { s *EtcdServer - - checkPut checkReqFunc - checkRange checkReqFunc } func (s *EtcdServer) newApplierV3Backend() applierV3 { - base := &applierV3backend{s: s} - base.checkPut = func(rv mvcc.ReadView, req *pb.RequestOp) error { - return base.checkRequestPut(rv, req) - } - base.checkRange = func(rv mvcc.ReadView, req *pb.RequestOp) error { - return base.checkRequestRange(rv, req) - } - return base + return &applierV3backend{s: s} } func (s *EtcdServer) newApplierV3Internal() applierV3Internal { @@ -249,434 +234,19 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest, shouldApplyV3 member } func (a *applierV3backend) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { - resp = &pb.PutResponse{} - resp.Header = &pb.ResponseHeader{} - trace = traceutil.Get(ctx) - // create put tracing if the trace in context is empty - if trace.IsEmpty() { - trace = traceutil.New("put", - a.s.Logger(), - traceutil.Field{Key: "key", Value: string(p.Key)}, - traceutil.Field{Key: "req_size", Value: p.Size()}, - ) - } - val, leaseID := p.Value, lease.LeaseID(p.Lease) - if txn == nil { - if leaseID != lease.NoLease { - if l := a.s.lessor.Lookup(leaseID); l == nil { - return nil, nil, lease.ErrLeaseNotFound - } - } - txn = a.s.KV().Write(trace) - defer txn.End() - } - - var rr *mvcc.RangeResult - if p.IgnoreValue || p.IgnoreLease || p.PrevKv { - trace.StepWithFunction(func() { - rr, err = txn.Range(context.TODO(), p.Key, nil, mvcc.RangeOptions{}) - }, "get previous kv pair") - - if err != nil { - return nil, nil, err - } - } - if p.IgnoreValue || p.IgnoreLease { - if rr == nil || len(rr.KVs) == 0 { - // ignore_{lease,value} flag expects previous key-value pair - return nil, nil, ErrKeyNotFound - } - } - if p.IgnoreValue { - val = rr.KVs[0].Value - } - if p.IgnoreLease { - leaseID = lease.LeaseID(rr.KVs[0].Lease) - } - if p.PrevKv { - if rr != nil && len(rr.KVs) != 0 { - resp.PrevKv = &rr.KVs[0] - } - } - - resp.Header.Revision = txn.Put(p.Key, val, leaseID) - trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision}) - return resp, trace, nil + return Put(ctx, a.s.Logger(), a.s.lessor, a.s.KV(), txn, p) } func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - resp := &pb.DeleteRangeResponse{} - resp.Header = &pb.ResponseHeader{} - end := mkGteRange(dr.RangeEnd) - - if txn == nil { - txn = a.s.kv.Write(traceutil.TODO()) - defer txn.End() - } - - if dr.PrevKv { - rr, err := txn.Range(context.TODO(), dr.Key, end, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - if rr != nil { - resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs)) - for i := range rr.KVs { - resp.PrevKvs[i] = &rr.KVs[i] - } - } - } - - resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, end) - return resp, nil + return DeleteRange(a.s.KV(), txn, dr) } func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { - trace := traceutil.Get(ctx) - - resp := &pb.RangeResponse{} - resp.Header = &pb.ResponseHeader{} - - lg := a.s.Logger() - - if txn == nil { - txn = a.s.kv.Read(mvcc.ConcurrentReadTxMode, trace) - defer txn.End() - } - - limit := r.Limit - if r.SortOrder != pb.RangeRequest_NONE || - r.MinModRevision != 0 || r.MaxModRevision != 0 || - r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 { - // fetch everything; sort and truncate afterwards - limit = 0 - } - if limit > 0 { - // fetch one extra for 'more' flag - limit = limit + 1 - } - - ro := mvcc.RangeOptions{ - Limit: limit, - Rev: r.Revision, - Count: r.CountOnly, - } - - rr, err := txn.Range(ctx, r.Key, mkGteRange(r.RangeEnd), ro) - if err != nil { - return nil, err - } - - if r.MaxModRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision } - pruneKVs(rr, f) - } - if r.MinModRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision } - pruneKVs(rr, f) - } - if r.MaxCreateRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision } - pruneKVs(rr, f) - } - if r.MinCreateRevision != 0 { - f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision } - pruneKVs(rr, f) - } - - sortOrder := r.SortOrder - if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE { - // Since current mvcc.Range implementation returns results - // sorted by keys in lexiographically ascending order, - // sort ASCEND by default only when target is not 'KEY' - sortOrder = pb.RangeRequest_ASCEND - } else if r.SortTarget == pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_ASCEND { - // Since current mvcc.Range implementation returns results - // sorted by keys in lexiographically ascending order, - // don't re-sort when target is 'KEY' and order is ASCEND - sortOrder = pb.RangeRequest_NONE - } - if sortOrder != pb.RangeRequest_NONE { - var sorter sort.Interface - switch { - case r.SortTarget == pb.RangeRequest_KEY: - sorter = &kvSortByKey{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_VERSION: - sorter = &kvSortByVersion{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_CREATE: - sorter = &kvSortByCreate{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_MOD: - sorter = &kvSortByMod{&kvSort{rr.KVs}} - case r.SortTarget == pb.RangeRequest_VALUE: - sorter = &kvSortByValue{&kvSort{rr.KVs}} - default: - lg.Panic("unexpected sort target", zap.Int32("sort-target", int32(r.SortTarget))) - } - switch { - case sortOrder == pb.RangeRequest_ASCEND: - sort.Sort(sorter) - case sortOrder == pb.RangeRequest_DESCEND: - sort.Sort(sort.Reverse(sorter)) - } - } - - if r.Limit > 0 && len(rr.KVs) > int(r.Limit) { - rr.KVs = rr.KVs[:r.Limit] - resp.More = true - } - trace.Step("filter and sort the key-value pairs") - resp.Header.Revision = rr.Rev - resp.Count = int64(rr.Count) - resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs)) - for i := range rr.KVs { - if r.KeysOnly { - rr.KVs[i].Value = nil - } - resp.Kvs[i] = &rr.KVs[i] - } - trace.Step("assemble the response") - return resp, nil + return Range(ctx, a.s.Logger(), a.s.KV(), txn, r) } func (a *applierV3backend) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - trace := traceutil.Get(ctx) - if trace.IsEmpty() { - trace = traceutil.New("transaction", a.s.Logger()) - ctx = context.WithValue(ctx, traceutil.TraceKey, trace) - } - isWrite := !isTxnReadonly(rt) - - // When the transaction contains write operations, we use ReadTx instead of - // ConcurrentReadTx to avoid extra overhead of copying buffer. - var txn mvcc.TxnWrite - if isWrite && a.s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer { - txn = mvcc.NewReadOnlyTxnWrite(a.s.KV().Read(mvcc.SharedBufReadTxMode, trace)) - } else { - txn = mvcc.NewReadOnlyTxnWrite(a.s.KV().Read(mvcc.ConcurrentReadTxMode, trace)) - } - - var txnPath []bool - trace.StepWithFunction( - func() { - txnPath = compareToPath(txn, rt) - }, - "compare", - ) - - if isWrite { - trace.AddField(traceutil.Field{Key: "read_only", Value: false}) - if _, err := checkRequests(txn, rt, txnPath, a.checkPut); err != nil { - txn.End() - return nil, nil, err - } - } - if _, err := checkRequests(txn, rt, txnPath, a.checkRange); err != nil { - txn.End() - return nil, nil, err - } - trace.Step("check requests") - txnResp, _ := newTxnResp(rt, txnPath) - - // When executing mutable txn ops, etcd must hold the txn lock so - // readers do not see any intermediate results. Since writes are - // serialized on the raft loop, the revision in the read view will - // be the revision of the write txn. - if isWrite { - txn.End() - txn = a.s.KV().Write(trace) - } - a.applyTxn(ctx, txn, rt, txnPath, txnResp) - rev := txn.Rev() - if len(txn.Changes()) != 0 { - rev++ - } - txn.End() - - txnResp.Header.Revision = rev - trace.AddField( - traceutil.Field{Key: "number_of_response", Value: len(txnResp.Responses)}, - traceutil.Field{Key: "response_revision", Value: txnResp.Header.Revision}, - ) - return txnResp, trace, nil -} - -// newTxnResp allocates a txn response for a txn request given a path. -func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) { - reqs := rt.Success - if !txnPath[0] { - reqs = rt.Failure - } - resps := make([]*pb.ResponseOp, len(reqs)) - txnResp = &pb.TxnResponse{ - Responses: resps, - Succeeded: txnPath[0], - Header: &pb.ResponseHeader{}, - } - for i, req := range reqs { - switch tv := req.Request.(type) { - case *pb.RequestOp_RequestRange: - resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{}} - case *pb.RequestOp_RequestPut: - resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{}} - case *pb.RequestOp_RequestDeleteRange: - resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{}} - case *pb.RequestOp_RequestTxn: - resp, txns := newTxnResp(tv.RequestTxn, txnPath[1:]) - resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}} - txnPath = txnPath[1+txns:] - txnCount += txns + 1 - default: - } - } - return txnResp, txnCount -} - -func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool { - txnPath := make([]bool, 1) - ops := rt.Success - if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] { - ops = rt.Failure - } - for _, op := range ops { - tv, ok := op.Request.(*pb.RequestOp_RequestTxn) - if !ok || tv.RequestTxn == nil { - continue - } - txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...) - } - return txnPath -} - -func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool { - for _, c := range cmps { - if !applyCompare(rv, c) { - return false - } - } - return true -} - -// applyCompare applies the compare request. -// If the comparison succeeds, it returns true. Otherwise, returns false. -func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { - // TODO: possible optimizations - // * chunk reads for large ranges to conserve memory - // * rewrite rules for common patterns: - // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0" - // * caching - rr, err := rv.Range(context.TODO(), c.Key, mkGteRange(c.RangeEnd), mvcc.RangeOptions{}) - if err != nil { - return false - } - if len(rr.KVs) == 0 { - if c.Target == pb.Compare_VALUE { - // Always fail if comparing a value on a key/keys that doesn't exist; - // nil == empty string in grpc; no way to represent missing value - return false - } - return compareKV(c, mvccpb.KeyValue{}) - } - for _, kv := range rr.KVs { - if !compareKV(c, kv) { - return false - } - } - return true -} - -func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool { - var result int - rev := int64(0) - switch c.Target { - case pb.Compare_VALUE: - v := []byte{} - if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil { - v = tv.Value - } - result = bytes.Compare(ckv.Value, v) - case pb.Compare_CREATE: - if tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision); tv != nil { - rev = tv.CreateRevision - } - result = compareInt64(ckv.CreateRevision, rev) - case pb.Compare_MOD: - if tv, _ := c.TargetUnion.(*pb.Compare_ModRevision); tv != nil { - rev = tv.ModRevision - } - result = compareInt64(ckv.ModRevision, rev) - case pb.Compare_VERSION: - if tv, _ := c.TargetUnion.(*pb.Compare_Version); tv != nil { - rev = tv.Version - } - result = compareInt64(ckv.Version, rev) - case pb.Compare_LEASE: - if tv, _ := c.TargetUnion.(*pb.Compare_Lease); tv != nil { - rev = tv.Lease - } - result = compareInt64(ckv.Lease, rev) - } - switch c.Result { - case pb.Compare_EQUAL: - return result == 0 - case pb.Compare_NOT_EQUAL: - return result != 0 - case pb.Compare_GREATER: - return result > 0 - case pb.Compare_LESS: - return result < 0 - } - return true -} - -func (a *applierV3backend) applyTxn(ctx context.Context, txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int) { - trace := traceutil.Get(ctx) - reqs := rt.Success - if !txnPath[0] { - reqs = rt.Failure - } - - lg := a.s.Logger() - for i, req := range reqs { - respi := tresp.Responses[i].Response - switch tv := req.Request.(type) { - case *pb.RequestOp_RequestRange: - trace.StartSubTrace( - traceutil.Field{Key: "req_type", Value: "range"}, - traceutil.Field{Key: "range_begin", Value: string(tv.RequestRange.Key)}, - traceutil.Field{Key: "range_end", Value: string(tv.RequestRange.RangeEnd)}) - resp, err := a.Range(ctx, txn, tv.RequestRange) - if err != nil { - lg.Panic("unexpected error during txn", zap.Error(err)) - } - respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp - trace.StopSubTrace() - case *pb.RequestOp_RequestPut: - trace.StartSubTrace( - traceutil.Field{Key: "req_type", Value: "put"}, - traceutil.Field{Key: "key", Value: string(tv.RequestPut.Key)}, - traceutil.Field{Key: "req_size", Value: tv.RequestPut.Size()}) - resp, _, err := a.Put(ctx, txn, tv.RequestPut) - if err != nil { - lg.Panic("unexpected error during txn", zap.Error(err)) - } - respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp - trace.StopSubTrace() - case *pb.RequestOp_RequestDeleteRange: - resp, err := a.DeleteRange(txn, tv.RequestDeleteRange) - if err != nil { - lg.Panic("unexpected error during txn", zap.Error(err)) - } - respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp - case *pb.RequestOp_RequestTxn: - resp := respi.(*pb.ResponseOp_ResponseTxn).ResponseTxn - applyTxns := a.applyTxn(ctx, txn, tv.RequestTxn, txnPath[1:], resp) - txns += applyTxns + 1 - txnPath = txnPath[applyTxns+1:] - default: - // empty union - } - } - return txns + return Txn(ctx, a.s.Logger(), rt, a.s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer, a.s.KV(), a.s.lessor) } func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { @@ -1007,131 +577,6 @@ func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantRes return resp, err } -type kvSort struct{ kvs []mvccpb.KeyValue } - -func (s *kvSort) Swap(i, j int) { - t := s.kvs[i] - s.kvs[i] = s.kvs[j] - s.kvs[j] = t -} -func (s *kvSort) Len() int { return len(s.kvs) } - -type kvSortByKey struct{ *kvSort } - -func (s *kvSortByKey) Less(i, j int) bool { - return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0 -} - -type kvSortByVersion struct{ *kvSort } - -func (s *kvSortByVersion) Less(i, j int) bool { - return (s.kvs[i].Version - s.kvs[j].Version) < 0 -} - -type kvSortByCreate struct{ *kvSort } - -func (s *kvSortByCreate) Less(i, j int) bool { - return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0 -} - -type kvSortByMod struct{ *kvSort } - -func (s *kvSortByMod) Less(i, j int) bool { - return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0 -} - -type kvSortByValue struct{ *kvSort } - -func (s *kvSortByValue) Less(i, j int) bool { - return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 -} - -func checkRequests(rv mvcc.ReadView, rt *pb.TxnRequest, txnPath []bool, f checkReqFunc) (int, error) { - txnCount := 0 - reqs := rt.Success - if !txnPath[0] { - reqs = rt.Failure - } - for _, req := range reqs { - if tv, ok := req.Request.(*pb.RequestOp_RequestTxn); ok && tv.RequestTxn != nil { - txns, err := checkRequests(rv, tv.RequestTxn, txnPath[1:], f) - if err != nil { - return 0, err - } - txnCount += txns + 1 - txnPath = txnPath[txns+1:] - continue - } - if err := f(rv, req); err != nil { - return 0, err - } - } - return txnCount, nil -} - -func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqOp *pb.RequestOp) error { - tv, ok := reqOp.Request.(*pb.RequestOp_RequestPut) - if !ok || tv.RequestPut == nil { - return nil - } - req := tv.RequestPut - if req.IgnoreValue || req.IgnoreLease { - // expects previous key-value, error if not exist - rr, err := rv.Range(context.TODO(), req.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return err - } - if rr == nil || len(rr.KVs) == 0 { - return ErrKeyNotFound - } - } - if lease.LeaseID(req.Lease) != lease.NoLease { - if l := a.s.lessor.Lookup(lease.LeaseID(req.Lease)); l == nil { - return lease.ErrLeaseNotFound - } - } - return nil -} - -func (a *applierV3backend) checkRequestRange(rv mvcc.ReadView, reqOp *pb.RequestOp) error { - tv, ok := reqOp.Request.(*pb.RequestOp_RequestRange) - if !ok || tv.RequestRange == nil { - return nil - } - req := tv.RequestRange - switch { - case req.Revision == 0: - return nil - case req.Revision > rv.Rev(): - return mvcc.ErrFutureRev - case req.Revision < rv.FirstRev(): - return mvcc.ErrCompacted - } - return nil -} - -func compareInt64(a, b int64) int { - switch { - case a < b: - return -1 - case a > b: - return 1 - default: - return 0 - } -} - -// mkGteRange determines if the range end is a >= range. This works around grpc -// sending empty byte strings as nil; >= is encoded in the range end as '\0'. -// If it is a GTE range, then []byte{} is returned to indicate the empty byte -// string (vs nil being no byte string). -func mkGteRange(rangeEnd []byte) []byte { - if len(rangeEnd) == 1 && rangeEnd[0] == 0 { - return []byte{} - } - return rangeEnd -} - func noSideEffect(r *pb.InternalRaftRequest) bool { return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil || r.AuthStatus != nil } @@ -1154,17 +599,6 @@ func removeNeedlessRangeReqs(txn *pb.TxnRequest) { txn.Failure = f(txn.Failure) } -func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) { - j := 0 - for i := range rr.KVs { - rr.KVs[j] = rr.KVs[i] - if !isPrunable(&rr.KVs[i]) { - j++ - } - } - rr.KVs = rr.KVs[:j] -} - func newHeader(s *EtcdServer) *pb.ResponseHeader { return &pb.ResponseHeader{ ClusterId: uint64(s.Cluster().ID()), diff --git a/server/etcdserver/server.go b/server/etcdserver/server.go index 545a83cd98d..8f6c75a1c68 100644 --- a/server/etcdserver/server.go +++ b/server/etcdserver/server.go @@ -252,8 +252,6 @@ type EtcdServer struct { // applyV3 is the applier with auth and quotas applyV3 applierV3 - // applyV3Base is the core applier without auth or quotas - applyV3Base applierV3 // applyV3Internal is the applier for internal request applyV3Internal applierV3Internal applyWait wait.WaitTime @@ -392,7 +390,6 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { srv.compactor.Run() } - srv.applyV3Base = srv.newApplierV3Backend() srv.applyV3Internal = srv.newApplierV3Internal() if err = srv.restoreAlarms(); err != nil { return nil, err diff --git a/server/etcdserver/txn.go b/server/etcdserver/txn.go new file mode 100644 index 00000000000..747017093d9 --- /dev/null +++ b/server/etcdserver/txn.go @@ -0,0 +1,597 @@ +// Copyright 2022 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "bytes" + "context" + "sort" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/api/v3/mvccpb" + "go.etcd.io/etcd/pkg/v3/traceutil" + "go.etcd.io/etcd/server/v3/lease" + "go.etcd.io/etcd/server/v3/storage/mvcc" + "go.uber.org/zap" +) + +func Put(ctx context.Context, lg *zap.Logger, lessor lease.Lessor, kv mvcc.KV, txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { + resp = &pb.PutResponse{} + resp.Header = &pb.ResponseHeader{} + trace = traceutil.Get(ctx) + // create put tracing if the trace in context is empty + if trace.IsEmpty() { + trace = traceutil.New("put", + lg, + traceutil.Field{Key: "key", Value: string(p.Key)}, + traceutil.Field{Key: "req_size", Value: p.Size()}, + ) + } + val, leaseID := p.Value, lease.LeaseID(p.Lease) + if txn == nil { + if leaseID != lease.NoLease { + if l := lessor.Lookup(leaseID); l == nil { + return nil, nil, lease.ErrLeaseNotFound + } + } + txn = kv.Write(trace) + defer txn.End() + } + + var rr *mvcc.RangeResult + if p.IgnoreValue || p.IgnoreLease || p.PrevKv { + trace.StepWithFunction(func() { + rr, err = txn.Range(context.TODO(), p.Key, nil, mvcc.RangeOptions{}) + }, "get previous kv pair") + + if err != nil { + return nil, nil, err + } + } + if p.IgnoreValue || p.IgnoreLease { + if rr == nil || len(rr.KVs) == 0 { + // ignore_{lease,value} flag expects previous key-value pair + return nil, nil, ErrKeyNotFound + } + } + if p.IgnoreValue { + val = rr.KVs[0].Value + } + if p.IgnoreLease { + leaseID = lease.LeaseID(rr.KVs[0].Lease) + } + if p.PrevKv { + if rr != nil && len(rr.KVs) != 0 { + resp.PrevKv = &rr.KVs[0] + } + } + + resp.Header.Revision = txn.Put(p.Key, val, leaseID) + trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision}) + return resp, trace, nil +} + +func DeleteRange(kv mvcc.KV, txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + resp := &pb.DeleteRangeResponse{} + resp.Header = &pb.ResponseHeader{} + end := mkGteRange(dr.RangeEnd) + + if txn == nil { + txn = kv.Write(traceutil.TODO()) + defer txn.End() + } + + if dr.PrevKv { + rr, err := txn.Range(context.TODO(), dr.Key, end, mvcc.RangeOptions{}) + if err != nil { + return nil, err + } + if rr != nil { + resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs)) + for i := range rr.KVs { + resp.PrevKvs[i] = &rr.KVs[i] + } + } + } + + resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, end) + return resp, nil +} + +func Range(ctx context.Context, lg *zap.Logger, kv mvcc.KV, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { + trace := traceutil.Get(ctx) + + resp := &pb.RangeResponse{} + resp.Header = &pb.ResponseHeader{} + + if txn == nil { + txn = kv.Read(mvcc.ConcurrentReadTxMode, trace) + defer txn.End() + } + + limit := r.Limit + if r.SortOrder != pb.RangeRequest_NONE || + r.MinModRevision != 0 || r.MaxModRevision != 0 || + r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 { + // fetch everything; sort and truncate afterwards + limit = 0 + } + if limit > 0 { + // fetch one extra for 'more' flag + limit = limit + 1 + } + + ro := mvcc.RangeOptions{ + Limit: limit, + Rev: r.Revision, + Count: r.CountOnly, + } + + rr, err := txn.Range(ctx, r.Key, mkGteRange(r.RangeEnd), ro) + if err != nil { + return nil, err + } + + if r.MaxModRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision } + pruneKVs(rr, f) + } + if r.MinModRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision } + pruneKVs(rr, f) + } + if r.MaxCreateRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision } + pruneKVs(rr, f) + } + if r.MinCreateRevision != 0 { + f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision } + pruneKVs(rr, f) + } + + sortOrder := r.SortOrder + if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE { + // Since current mvcc.Range implementation returns results + // sorted by keys in lexiographically ascending order, + // sort ASCEND by default only when target is not 'KEY' + sortOrder = pb.RangeRequest_ASCEND + } else if r.SortTarget == pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_ASCEND { + // Since current mvcc.Range implementation returns results + // sorted by keys in lexiographically ascending order, + // don't re-sort when target is 'KEY' and order is ASCEND + sortOrder = pb.RangeRequest_NONE + } + if sortOrder != pb.RangeRequest_NONE { + var sorter sort.Interface + switch { + case r.SortTarget == pb.RangeRequest_KEY: + sorter = &kvSortByKey{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_VERSION: + sorter = &kvSortByVersion{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_CREATE: + sorter = &kvSortByCreate{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_MOD: + sorter = &kvSortByMod{&kvSort{rr.KVs}} + case r.SortTarget == pb.RangeRequest_VALUE: + sorter = &kvSortByValue{&kvSort{rr.KVs}} + default: + lg.Panic("unexpected sort target", zap.Int32("sort-target", int32(r.SortTarget))) + } + switch { + case sortOrder == pb.RangeRequest_ASCEND: + sort.Sort(sorter) + case sortOrder == pb.RangeRequest_DESCEND: + sort.Sort(sort.Reverse(sorter)) + } + } + + if r.Limit > 0 && len(rr.KVs) > int(r.Limit) { + rr.KVs = rr.KVs[:r.Limit] + resp.More = true + } + trace.Step("filter and sort the key-value pairs") + resp.Header.Revision = rr.Rev + resp.Count = int64(rr.Count) + resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs)) + for i := range rr.KVs { + if r.KeysOnly { + rr.KVs[i].Value = nil + } + resp.Kvs[i] = &rr.KVs[i] + } + trace.Step("assemble the response") + return resp, nil +} + +func Txn(ctx context.Context, lg *zap.Logger, rt *pb.TxnRequest, txnModeWriteWithSharedBuffer bool, kv mvcc.KV, lessor lease.Lessor) (*pb.TxnResponse, *traceutil.Trace, error) { + trace := traceutil.Get(ctx) + if trace.IsEmpty() { + trace = traceutil.New("transaction", lg) + ctx = context.WithValue(ctx, traceutil.TraceKey, trace) + } + isWrite := !isTxnReadonly(rt) + + // When the transaction contains write operations, we use ReadTx instead of + // ConcurrentReadTx to avoid extra overhead of copying buffer. + var txn mvcc.TxnWrite + if isWrite && txnModeWriteWithSharedBuffer /*a.s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer*/ { + txn = mvcc.NewReadOnlyTxnWrite(kv.Read(mvcc.SharedBufReadTxMode, trace)) + } else { + txn = mvcc.NewReadOnlyTxnWrite(kv.Read(mvcc.ConcurrentReadTxMode, trace)) + } + + var txnPath []bool + trace.StepWithFunction( + func() { + txnPath = compareToPath(txn, rt) + }, + "compare", + ) + + if isWrite { + trace.AddField(traceutil.Field{Key: "read_only", Value: false}) + if _, err := checkRequests(txn, rt, txnPath, + func(rv mvcc.ReadView, ro *pb.RequestOp) error { return checkRequestPut(rv, lessor, ro) }); err != nil { + txn.End() + return nil, nil, err + } + } + if _, err := checkRequests(txn, rt, txnPath, checkRequestRange); err != nil { + txn.End() + return nil, nil, err + } + trace.Step("check requests") + txnResp, _ := newTxnResp(rt, txnPath) + + // When executing mutable txn ops, etcd must hold the txn lock so + // readers do not see any intermediate results. Since writes are + // serialized on the raft loop, the revision in the read view will + // be the revision of the write txn. + if isWrite { + txn.End() + txn = kv.Write(trace) + } + applyTxn(ctx, lg, kv, lessor, txn, rt, txnPath, txnResp) + rev := txn.Rev() + if len(txn.Changes()) != 0 { + rev++ + } + txn.End() + + txnResp.Header.Revision = rev + trace.AddField( + traceutil.Field{Key: "number_of_response", Value: len(txnResp.Responses)}, + traceutil.Field{Key: "response_revision", Value: txnResp.Header.Revision}, + ) + return txnResp, trace, nil +} + +// newTxnResp allocates a txn response for a txn request given a path. +func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) { + reqs := rt.Success + if !txnPath[0] { + reqs = rt.Failure + } + resps := make([]*pb.ResponseOp, len(reqs)) + txnResp = &pb.TxnResponse{ + Responses: resps, + Succeeded: txnPath[0], + Header: &pb.ResponseHeader{}, + } + for i, req := range reqs { + switch tv := req.Request.(type) { + case *pb.RequestOp_RequestRange: + resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{}} + case *pb.RequestOp_RequestPut: + resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{}} + case *pb.RequestOp_RequestDeleteRange: + resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{}} + case *pb.RequestOp_RequestTxn: + resp, txns := newTxnResp(tv.RequestTxn, txnPath[1:]) + resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}} + txnPath = txnPath[1+txns:] + txnCount += txns + 1 + default: + } + } + return txnResp, txnCount +} + +func applyTxn(ctx context.Context, lg *zap.Logger, kv mvcc.KV, lessor lease.Lessor, txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int) { + trace := traceutil.Get(ctx) + reqs := rt.Success + if !txnPath[0] { + reqs = rt.Failure + } + + for i, req := range reqs { + respi := tresp.Responses[i].Response + switch tv := req.Request.(type) { + case *pb.RequestOp_RequestRange: + trace.StartSubTrace( + traceutil.Field{Key: "req_type", Value: "range"}, + traceutil.Field{Key: "range_begin", Value: string(tv.RequestRange.Key)}, + traceutil.Field{Key: "range_end", Value: string(tv.RequestRange.RangeEnd)}) + resp, err := Range(ctx, lg, kv, txn, tv.RequestRange) + if err != nil { + lg.Panic("unexpected error during txn", zap.Error(err)) + } + respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp + trace.StopSubTrace() + case *pb.RequestOp_RequestPut: + trace.StartSubTrace( + traceutil.Field{Key: "req_type", Value: "put"}, + traceutil.Field{Key: "key", Value: string(tv.RequestPut.Key)}, + traceutil.Field{Key: "req_size", Value: tv.RequestPut.Size()}) + resp, _, err := Put(ctx, lg, lessor, kv, txn, tv.RequestPut) + if err != nil { + lg.Panic("unexpected error during txn", zap.Error(err)) + } + respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp + trace.StopSubTrace() + case *pb.RequestOp_RequestDeleteRange: + resp, err := DeleteRange(kv, txn, tv.RequestDeleteRange) + if err != nil { + lg.Panic("unexpected error during txn", zap.Error(err)) + } + respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp + case *pb.RequestOp_RequestTxn: + resp := respi.(*pb.ResponseOp_ResponseTxn).ResponseTxn + applyTxns := applyTxn(ctx, lg, kv, lessor, txn, tv.RequestTxn, txnPath[1:], resp) + txns += applyTxns + 1 + txnPath = txnPath[applyTxns+1:] + default: + // empty union + } + } + return txns +} + +//--------------------------------------------------------- + +type checkReqFunc func(mvcc.ReadView, *pb.RequestOp) error + +func checkRequestPut(rv mvcc.ReadView, lessor lease.Lessor, reqOp *pb.RequestOp) error { + tv, ok := reqOp.Request.(*pb.RequestOp_RequestPut) + if !ok || tv.RequestPut == nil { + return nil + } + req := tv.RequestPut + if req.IgnoreValue || req.IgnoreLease { + // expects previous key-value, error if not exist + rr, err := rv.Range(context.TODO(), req.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return err + } + if rr == nil || len(rr.KVs) == 0 { + return ErrKeyNotFound + } + } + if lease.LeaseID(req.Lease) != lease.NoLease { + if l := lessor.Lookup(lease.LeaseID(req.Lease)); l == nil { + return lease.ErrLeaseNotFound + } + } + return nil +} + +func checkRequestRange(rv mvcc.ReadView, reqOp *pb.RequestOp) error { + tv, ok := reqOp.Request.(*pb.RequestOp_RequestRange) + if !ok || tv.RequestRange == nil { + return nil + } + req := tv.RequestRange + switch { + case req.Revision == 0: + return nil + case req.Revision > rv.Rev(): + return mvcc.ErrFutureRev + case req.Revision < rv.FirstRev(): + return mvcc.ErrCompacted + } + return nil +} + +func checkRequests(rv mvcc.ReadView, rt *pb.TxnRequest, txnPath []bool, f checkReqFunc) (int, error) { + txnCount := 0 + reqs := rt.Success + if !txnPath[0] { + reqs = rt.Failure + } + for _, req := range reqs { + if tv, ok := req.Request.(*pb.RequestOp_RequestTxn); ok && tv.RequestTxn != nil { + txns, err := checkRequests(rv, tv.RequestTxn, txnPath[1:], f) + if err != nil { + return 0, err + } + txnCount += txns + 1 + txnPath = txnPath[txns+1:] + continue + } + if err := f(rv, req); err != nil { + return 0, err + } + } + return txnCount, nil +} + +// mkGteRange determines if the range end is a >= range. This works around grpc +// sending empty byte strings as nil; >= is encoded in the range end as '\0'. +// If it is a GTE range, then []byte{} is returned to indicate the empty byte +// string (vs nil being no byte string). +func mkGteRange(rangeEnd []byte) []byte { + if len(rangeEnd) == 1 && rangeEnd[0] == 0 { + return []byte{} + } + return rangeEnd +} + +func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) { + j := 0 + for i := range rr.KVs { + rr.KVs[j] = rr.KVs[i] + if !isPrunable(&rr.KVs[i]) { + j++ + } + } + rr.KVs = rr.KVs[:j] +} + +type kvSort struct{ kvs []mvccpb.KeyValue } + +func (s *kvSort) Swap(i, j int) { + t := s.kvs[i] + s.kvs[i] = s.kvs[j] + s.kvs[j] = t +} +func (s *kvSort) Len() int { return len(s.kvs) } + +type kvSortByKey struct{ *kvSort } + +func (s *kvSortByKey) Less(i, j int) bool { + return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0 +} + +type kvSortByVersion struct{ *kvSort } + +func (s *kvSortByVersion) Less(i, j int) bool { + return (s.kvs[i].Version - s.kvs[j].Version) < 0 +} + +type kvSortByCreate struct{ *kvSort } + +func (s *kvSortByCreate) Less(i, j int) bool { + return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0 +} + +type kvSortByMod struct{ *kvSort } + +func (s *kvSortByMod) Less(i, j int) bool { + return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0 +} + +type kvSortByValue struct{ *kvSort } + +func (s *kvSortByValue) Less(i, j int) bool { + return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 +} + +func compareInt64(a, b int64) int { + switch { + case a < b: + return -1 + case a > b: + return 1 + default: + return 0 + } +} + +func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool { + txnPath := make([]bool, 1) + ops := rt.Success + if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] { + ops = rt.Failure + } + for _, op := range ops { + tv, ok := op.Request.(*pb.RequestOp_RequestTxn) + if !ok || tv.RequestTxn == nil { + continue + } + txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...) + } + return txnPath +} + +func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool { + for _, c := range cmps { + if !applyCompare(rv, c) { + return false + } + } + return true +} + +// applyCompare applies the compare request. +// If the comparison succeeds, it returns true. Otherwise, returns false. +func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { + // TODO: possible optimizations + // * chunk reads for large ranges to conserve memory + // * rewrite rules for common patterns: + // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0" + // * caching + rr, err := rv.Range(context.TODO(), c.Key, mkGteRange(c.RangeEnd), mvcc.RangeOptions{}) + if err != nil { + return false + } + if len(rr.KVs) == 0 { + if c.Target == pb.Compare_VALUE { + // Always fail if comparing a value on a key/keys that doesn't exist; + // nil == empty string in grpc; no way to represent missing value + return false + } + return compareKV(c, mvccpb.KeyValue{}) + } + for _, kv := range rr.KVs { + if !compareKV(c, kv) { + return false + } + } + return true +} + +func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool { + var result int + rev := int64(0) + switch c.Target { + case pb.Compare_VALUE: + v := []byte{} + if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil { + v = tv.Value + } + result = bytes.Compare(ckv.Value, v) + case pb.Compare_CREATE: + if tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision); tv != nil { + rev = tv.CreateRevision + } + result = compareInt64(ckv.CreateRevision, rev) + case pb.Compare_MOD: + if tv, _ := c.TargetUnion.(*pb.Compare_ModRevision); tv != nil { + rev = tv.ModRevision + } + result = compareInt64(ckv.ModRevision, rev) + case pb.Compare_VERSION: + if tv, _ := c.TargetUnion.(*pb.Compare_Version); tv != nil { + rev = tv.Version + } + result = compareInt64(ckv.Version, rev) + case pb.Compare_LEASE: + if tv, _ := c.TargetUnion.(*pb.Compare_Lease); tv != nil { + rev = tv.Lease + } + result = compareInt64(ckv.Lease, rev) + } + switch c.Result { + case pb.Compare_EQUAL: + return result == 0 + case pb.Compare_NOT_EQUAL: + return result != 0 + case pb.Compare_GREATER: + return result > 0 + case pb.Compare_LESS: + return result < 0 + } + return true +} diff --git a/server/etcdserver/v3_server.go b/server/etcdserver/v3_server.go index 154cbee2357..3dad86a63b4 100644 --- a/server/etcdserver/v3_server.go +++ b/server/etcdserver/v3_server.go @@ -128,7 +128,7 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) } - get := func() { resp, err = s.applyV3Base.Range(ctx, nil, r) } + get := func() { resp, err = Range(ctx, s.Logger(), s.KV(), nil, r) } if serr := s.doSerialize(ctx, chk, get); serr != nil { err = serr return nil, err @@ -178,7 +178,9 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse trace.LogIfLong(traceThreshold) }(time.Now()) - get := func() { resp, _, err = s.applyV3Base.Txn(ctx, r) } + get := func() { + resp, _, err = Txn(ctx, s.Logger(), r, s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer, s.KV(), s.lessor) + } if serr := s.doSerialize(ctx, chk, get); serr != nil { return nil, serr } @@ -390,7 +392,8 @@ func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveR return nil, ErrCanceled } -func (s *EtcdServer) LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) { +// LeaseLeases is really ListLeases !??? +func (s *EtcdServer) LeaseLeases(_ context.Context, _ *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) { ls := s.lessor.Leases() lss := make([]*pb.LeaseStatus, len(ls)) for i := range ls { From b7ad746bfe6e2e4f7cb3beb89a8dff3c37d92b87 Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Sat, 2 Apr 2022 21:25:46 +0200 Subject: [PATCH 02/18] Encapsulating applier logic: UberApplier coordinates all appliers for server This PR: - moves wrapping of appliers (due to Alarms) out of server.go into uber_applier.go - clearly devides the application logic into: chain of: a) 'WrapApply' (generic logic across all the methods) b) dispatcher (translation of Apply into specific method like 'Put') c) chain of 'wrappers' of the specific methods (like Put). - when we do recovery (restore from snapshot) we create new instance of appliers. The purpose is to make sure we control all the depencies of the apply process, i.e. we can supply e.g. special instance of 'backend' to the application logic. --- server/etcdserver/apply.go | 176 ++---------------------- server/etcdserver/apply_auth.go | 4 +- server/etcdserver/server.go | 23 ++-- server/etcdserver/uber_applier.go | 220 ++++++++++++++++++++++++++++++ 4 files changed, 240 insertions(+), 183 deletions(-) create mode 100644 server/etcdserver/uber_applier.go diff --git a/server/etcdserver/apply.go b/server/etcdserver/apply.go index afef0926389..f54099d6baa 100644 --- a/server/etcdserver/apply.go +++ b/server/etcdserver/apply.go @@ -16,9 +16,6 @@ package etcdserver import ( "context" - "fmt" - "strconv" - "time" "github.com/coreos/go-semver/semver" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -45,7 +42,7 @@ type applyResult struct { resp proto.Message err error // physc signals the physical effect of the request has completed in addition - // to being logically reflected by the node. Currently only used for + // to being logically reflected by the node. Currently, only used for // Compaction requests. physc <-chan struct{} trace *traceutil.Trace @@ -58,9 +55,12 @@ type applierV3Internal interface { DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) } +type ApplyFunc func(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult + // applierV3 is the interface for processing V3 raft messages type applierV3 interface { - Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult + WrapApply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc ApplyFunc) *applyResult + //Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) @@ -100,137 +100,8 @@ type applierV3backend struct { s *EtcdServer } -func (s *EtcdServer) newApplierV3Backend() applierV3 { - return &applierV3backend{s: s} -} - -func (s *EtcdServer) newApplierV3Internal() applierV3Internal { - base := &applierV3backend{s: s} - return base -} - -func (s *EtcdServer) newApplierV3() applierV3 { - return newAuthApplierV3( - s.AuthStore(), - newQuotaApplierV3(s, s.newApplierV3Backend()), - s.lessor, - ) -} - -func (a *applierV3backend) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult { - op := "unknown" - ar := &applyResult{} - defer func(start time.Time) { - success := ar.err == nil || ar.err == mvcc.ErrCompacted - applySec.WithLabelValues(v3Version, op, strconv.FormatBool(success)).Observe(time.Since(start).Seconds()) - warnOfExpensiveRequest(a.s.Logger(), a.s.Cfg.WarningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err) - if !success { - warnOfFailedRequest(a.s.Logger(), start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err) - } - }(time.Now()) - - switch { - case r.ClusterVersionSet != nil: // Implemented in 3.5.x - op = "ClusterVersionSet" - a.s.applyV3Internal.ClusterVersionSet(r.ClusterVersionSet, shouldApplyV3) - return ar - case r.ClusterMemberAttrSet != nil: - op = "ClusterMemberAttrSet" // Implemented in 3.5.x - a.s.applyV3Internal.ClusterMemberAttrSet(r.ClusterMemberAttrSet, shouldApplyV3) - return ar - case r.DowngradeInfoSet != nil: - op = "DowngradeInfoSet" // Implemented in 3.5.x - a.s.applyV3Internal.DowngradeInfoSet(r.DowngradeInfoSet, shouldApplyV3) - return ar - } - - if !shouldApplyV3 { - return nil - } - - // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls - switch { - case r.Range != nil: - op = "Range" - ar.resp, ar.err = a.s.applyV3.Range(context.TODO(), nil, r.Range) - case r.Put != nil: - op = "Put" - ar.resp, ar.trace, ar.err = a.s.applyV3.Put(context.TODO(), nil, r.Put) - case r.DeleteRange != nil: - op = "DeleteRange" - ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange) - case r.Txn != nil: - op = "Txn" - ar.resp, ar.trace, ar.err = a.s.applyV3.Txn(context.TODO(), r.Txn) - case r.Compaction != nil: - op = "Compaction" - ar.resp, ar.physc, ar.trace, ar.err = a.s.applyV3.Compaction(r.Compaction) - case r.LeaseGrant != nil: - op = "LeaseGrant" - ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant) - case r.LeaseRevoke != nil: - op = "LeaseRevoke" - ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke) - case r.LeaseCheckpoint != nil: - op = "LeaseCheckpoint" - ar.resp, ar.err = a.s.applyV3.LeaseCheckpoint(r.LeaseCheckpoint) - case r.Alarm != nil: - op = "Alarm" - ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm) - case r.Authenticate != nil: - op = "Authenticate" - ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate) - case r.AuthEnable != nil: - op = "AuthEnable" - ar.resp, ar.err = a.s.applyV3.AuthEnable() - case r.AuthDisable != nil: - op = "AuthDisable" - ar.resp, ar.err = a.s.applyV3.AuthDisable() - case r.AuthStatus != nil: - ar.resp, ar.err = a.s.applyV3.AuthStatus() - case r.AuthUserAdd != nil: - op = "AuthUserAdd" - ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd) - case r.AuthUserDelete != nil: - op = "AuthUserDelete" - ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete) - case r.AuthUserChangePassword != nil: - op = "AuthUserChangePassword" - ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword) - case r.AuthUserGrantRole != nil: - op = "AuthUserGrantRole" - ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole) - case r.AuthUserGet != nil: - op = "AuthUserGet" - ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet) - case r.AuthUserRevokeRole != nil: - op = "AuthUserRevokeRole" - ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole) - case r.AuthRoleAdd != nil: - op = "AuthRoleAdd" - ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd) - case r.AuthRoleGrantPermission != nil: - op = "AuthRoleGrantPermission" - ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission) - case r.AuthRoleGet != nil: - op = "AuthRoleGet" - ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet) - case r.AuthRoleRevokePermission != nil: - op = "AuthRoleRevokePermission" - ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission) - case r.AuthRoleDelete != nil: - op = "AuthRoleDelete" - ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete) - case r.AuthUserList != nil: - op = "AuthUserList" - ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList) - case r.AuthRoleList != nil: - op = "AuthRoleList" - ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList) - default: - a.s.lg.Panic("not implemented apply", zap.Stringer("raft-request", r)) - } - return ar +func (a *applierV3backend) WrapApply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc ApplyFunc) *applyResult { + return applyFunc(ctx, r, shouldApplyV3) } func (a *applierV3backend) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { @@ -295,9 +166,7 @@ func (a *applierV3backend) LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.L func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { resp := &pb.AlarmResponse{} - oldCount := len(a.s.alarmStore.Get(ar.Alarm)) - lg := a.s.Logger() switch ar.Action { case pb.AlarmRequest_GET: resp.Alarms = a.s.alarmStore.Get(ar.Alarm) @@ -310,39 +179,12 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) break } resp.Alarms = append(resp.Alarms, m) - activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1 - if !activated { - break - } - - lg.Warn("alarm raised", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String())) - switch m.Alarm { - case pb.AlarmType_CORRUPT: - a.s.applyV3 = newApplierV3Corrupt(a) - case pb.AlarmType_NOSPACE: - a.s.applyV3 = newApplierV3Capped(a) - default: - lg.Panic("unimplemented alarm activation", zap.String("alarm", fmt.Sprintf("%+v", m))) - } case pb.AlarmRequest_DEACTIVATE: m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm) if m == nil { break } resp.Alarms = append(resp.Alarms, m) - deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0 - if !deactivated { - break - } - - switch m.Alarm { - case pb.AlarmType_NOSPACE, pb.AlarmType_CORRUPT: - // TODO: check kv hash before deactivating CORRUPT? - lg.Warn("alarm disarmed", zap.String("alarm", m.Alarm.String()), zap.String("from", types.ID(m.MemberID).String())) - a.s.applyV3 = a.s.newApplierV3() - default: - lg.Warn("unimplemented alarm deactivation", zap.String("alarm", fmt.Sprintf("%+v", m))) - } default: return nil, nil } @@ -358,7 +200,7 @@ type applierV3Capped struct { // with Puts so that the number of keys in the store is capped. func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } -func (a *applierV3Capped) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { +func (a *applierV3Capped) Put(_ context.Context, _ mvcc.TxnWrite, _ *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { return nil, nil, ErrNoSpace } @@ -369,7 +211,7 @@ func (a *applierV3Capped) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnRes return a.applierV3.Txn(ctx, r) } -func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { +func (a *applierV3Capped) LeaseGrant(_ *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { return nil, ErrNoSpace } diff --git a/server/etcdserver/apply_auth.go b/server/etcdserver/apply_auth.go index bf043aa731b..f11625e4d6f 100644 --- a/server/etcdserver/apply_auth.go +++ b/server/etcdserver/apply_auth.go @@ -42,7 +42,7 @@ func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *a return &authApplierV3{applierV3: base, as: as, lessor: lessor} } -func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult { +func (aa *authApplierV3) WrapApply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc ApplyFunc) *applyResult { aa.mu.Lock() defer aa.mu.Unlock() if r.Header != nil { @@ -58,7 +58,7 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membersh return &applyResult{err: err} } } - ret := aa.applierV3.Apply(r, shouldApplyV3) + ret := aa.applierV3.WrapApply(ctx, r, shouldApplyV3, applyFunc) aa.authInfo.Username = "" aa.authInfo.Revision = 0 return ret diff --git a/server/etcdserver/server.go b/server/etcdserver/server.go index 8f6c75a1c68..0ac4d178d5d 100644 --- a/server/etcdserver/server.go +++ b/server/etcdserver/server.go @@ -250,11 +250,9 @@ type EtcdServer struct { applyV2 ApplierV2 - // applyV3 is the applier with auth and quotas - applyV3 applierV3 - // applyV3Internal is the applier for internal request - applyV3Internal applierV3Internal - applyWait wait.WaitTime + uberApply *uberApplier + + applyWait wait.WaitTime kv mvcc.WatchableKV lessor lease.Lessor @@ -390,10 +388,10 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { srv.compactor.Run() } - srv.applyV3Internal = srv.newApplierV3Internal() if err = srv.restoreAlarms(); err != nil { return nil, err } + srv.uberApply = newUberApplier(srv) if srv.Cfg.EnableLeaseCheckpoint { // setting checkpointer enables lease checkpoint feature. @@ -1071,6 +1069,10 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { ep.appliedi = apply.snapshot.Metadata.Index ep.snapi = ep.appliedi ep.confState = apply.snapshot.Metadata.ConfState + + // As backends and implementations like alarmsStore changed, we need + // to re-bootstrap Appliers. + s.uberApply = newUberApplier(s) } func verifySnapshotIndex(snapshot raftpb.Snapshot, cindex uint64) { @@ -1888,7 +1890,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { removeNeedlessRangeReqs(raftReq.Txn) } applyV3Performed = true - ar = s.applyV3.Apply(&raftReq, shouldApplyV3) + ar = s.uberApply.Apply(&raftReq, shouldApplyV3) } // do not re-apply applied entries. @@ -2292,18 +2294,11 @@ func (s *EtcdServer) Backend() backend.Backend { func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore } func (s *EtcdServer) restoreAlarms() error { - s.applyV3 = s.newApplierV3() as, err := v3alarm.NewAlarmStore(s.lg, schema.NewAlarmBackend(s.lg, s.be)) if err != nil { return err } s.alarmStore = as - if len(as.Get(pb.AlarmType_NOSPACE)) > 0 { - s.applyV3 = newApplierV3Capped(s.applyV3) - } - if len(as.Get(pb.AlarmType_CORRUPT)) > 0 { - s.applyV3 = newApplierV3Corrupt(s.applyV3) - } return nil } diff --git a/server/etcdserver/uber_applier.go b/server/etcdserver/uber_applier.go new file mode 100644 index 00000000000..f8e41dbc02d --- /dev/null +++ b/server/etcdserver/uber_applier.go @@ -0,0 +1,220 @@ +// Copyright 2022 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "context" + "strconv" + "time" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" + "go.etcd.io/etcd/server/v3/storage/mvcc" + "go.uber.org/zap" +) + +type uberApplier struct { + lg *zap.Logger + + alarmStore *v3alarm.AlarmStore + warningApplyDuration time.Duration + + // This is the applier that is taking in consideration current alarms + applyV3 applierV3 + + // This is the applier used for wrapping when alarms change + applyV3base applierV3 + + // applyV3Internal is the applier for internal requests + // (that seems to bypass wrappings) + // TODO(ptab): Seems artificial and could be part of the regular stack. + applyV3Internal applierV3Internal +} + +func newUberApplier(s *EtcdServer) *uberApplier { + applyV3base_ := newApplierV3(s) + + ua := &uberApplier{ + lg: s.lg, + alarmStore: s.alarmStore, + warningApplyDuration: s.Cfg.WarningApplyDuration, + applyV3: applyV3base_, + applyV3base: applyV3base_, + applyV3Internal: newApplierV3Internal(s), + } + ua.RestoreAlarms() + return ua +} + +func newApplierV3Backend(s *EtcdServer) applierV3 { + return &applierV3backend{s: s} +} + +func newApplierV3Internal(s *EtcdServer) applierV3Internal { + base := &applierV3backend{s: s} + return base +} + +func newApplierV3(s *EtcdServer) applierV3 { + return newAuthApplierV3( + s.AuthStore(), + newQuotaApplierV3(s, newApplierV3Backend(s)), + s.lessor, + ) +} + +func (a *uberApplier) RestoreAlarms() { + noSpaceAlarms := len(a.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 + corruptAlarms := len(a.alarmStore.Get(pb.AlarmType_CORRUPT)) > 0 + a.applyV3 = a.applyV3base + if noSpaceAlarms { + a.applyV3 = newApplierV3Capped(a.applyV3) + } + if corruptAlarms { + a.applyV3 = newApplierV3Corrupt(a.applyV3) + } +} + +func (a *uberApplier) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult { + return a.applyV3.WrapApply(context.TODO(), r, shouldApplyV3, a.dispatch) +} + +// This function +func (a *uberApplier) dispatch(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult { + op := "unknown" + ar := &applyResult{} + defer func(start time.Time) { + success := ar.err == nil || ar.err == mvcc.ErrCompacted + applySec.WithLabelValues(v3Version, op, strconv.FormatBool(success)).Observe(time.Since(start).Seconds()) + warnOfExpensiveRequest(a.lg, a.warningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err) + if !success { + warnOfFailedRequest(a.lg, start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err) + } + }(time.Now()) + + switch { + case r.ClusterVersionSet != nil: // Implemented in 3.5.x + op = "ClusterVersionSet" + a.applyV3Internal.ClusterVersionSet(r.ClusterVersionSet, shouldApplyV3) + return ar + case r.ClusterMemberAttrSet != nil: + op = "ClusterMemberAttrSet" // Implemented in 3.5.x + a.applyV3Internal.ClusterMemberAttrSet(r.ClusterMemberAttrSet, shouldApplyV3) + return ar + case r.DowngradeInfoSet != nil: + op = "DowngradeInfoSet" // Implemented in 3.5.x + a.applyV3Internal.DowngradeInfoSet(r.DowngradeInfoSet, shouldApplyV3) + return ar + } + + if !shouldApplyV3 { + return nil + } + + // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls + switch { + case r.Range != nil: + op = "Range" + ar.resp, ar.err = a.applyV3.Range(ctx, nil, r.Range) + case r.Put != nil: + op = "Put" + ar.resp, ar.trace, ar.err = a.applyV3.Put(ctx, nil, r.Put) + case r.DeleteRange != nil: + op = "DeleteRange" + ar.resp, ar.err = a.applyV3.DeleteRange(nil, r.DeleteRange) + case r.Txn != nil: + op = "Txn" + ar.resp, ar.trace, ar.err = a.applyV3.Txn(ctx, r.Txn) + case r.Compaction != nil: + op = "Compaction" + ar.resp, ar.physc, ar.trace, ar.err = a.applyV3.Compaction(r.Compaction) + case r.LeaseGrant != nil: + op = "LeaseGrant" + ar.resp, ar.err = a.applyV3.LeaseGrant(r.LeaseGrant) + case r.LeaseRevoke != nil: + op = "LeaseRevoke" + ar.resp, ar.err = a.applyV3.LeaseRevoke(r.LeaseRevoke) + case r.LeaseCheckpoint != nil: + op = "LeaseCheckpoint" + ar.resp, ar.err = a.applyV3.LeaseCheckpoint(r.LeaseCheckpoint) + case r.Alarm != nil: + op = "Alarm" + ar.resp, ar.err = a.Alarm(r.Alarm) + case r.Authenticate != nil: + op = "Authenticate" + ar.resp, ar.err = a.applyV3.Authenticate(r.Authenticate) + case r.AuthEnable != nil: + op = "AuthEnable" + ar.resp, ar.err = a.applyV3.AuthEnable() + case r.AuthDisable != nil: + op = "AuthDisable" + ar.resp, ar.err = a.applyV3.AuthDisable() + case r.AuthStatus != nil: + ar.resp, ar.err = a.applyV3.AuthStatus() + case r.AuthUserAdd != nil: + op = "AuthUserAdd" + ar.resp, ar.err = a.applyV3.UserAdd(r.AuthUserAdd) + case r.AuthUserDelete != nil: + op = "AuthUserDelete" + ar.resp, ar.err = a.applyV3.UserDelete(r.AuthUserDelete) + case r.AuthUserChangePassword != nil: + op = "AuthUserChangePassword" + ar.resp, ar.err = a.applyV3.UserChangePassword(r.AuthUserChangePassword) + case r.AuthUserGrantRole != nil: + op = "AuthUserGrantRole" + ar.resp, ar.err = a.applyV3.UserGrantRole(r.AuthUserGrantRole) + case r.AuthUserGet != nil: + op = "AuthUserGet" + ar.resp, ar.err = a.applyV3.UserGet(r.AuthUserGet) + case r.AuthUserRevokeRole != nil: + op = "AuthUserRevokeRole" + ar.resp, ar.err = a.applyV3.UserRevokeRole(r.AuthUserRevokeRole) + case r.AuthRoleAdd != nil: + op = "AuthRoleAdd" + ar.resp, ar.err = a.applyV3.RoleAdd(r.AuthRoleAdd) + case r.AuthRoleGrantPermission != nil: + op = "AuthRoleGrantPermission" + ar.resp, ar.err = a.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission) + case r.AuthRoleGet != nil: + op = "AuthRoleGet" + ar.resp, ar.err = a.applyV3.RoleGet(r.AuthRoleGet) + case r.AuthRoleRevokePermission != nil: + op = "AuthRoleRevokePermission" + ar.resp, ar.err = a.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission) + case r.AuthRoleDelete != nil: + op = "AuthRoleDelete" + ar.resp, ar.err = a.applyV3.RoleDelete(r.AuthRoleDelete) + case r.AuthUserList != nil: + op = "AuthUserList" + ar.resp, ar.err = a.applyV3.UserList(r.AuthUserList) + case r.AuthRoleList != nil: + op = "AuthRoleList" + ar.resp, ar.err = a.applyV3.RoleList(r.AuthRoleList) + default: + a.lg.Panic("not implemented apply", zap.Stringer("raft-request", r)) + } + return ar +} + +func (a *uberApplier) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { + resp, err := a.applyV3.Alarm(ar) + + if ar.Action == pb.AlarmRequest_ACTIVATE || + ar.Action == pb.AlarmRequest_DEACTIVATE { + a.RestoreAlarms() + } + return resp, err +} From f348134edd37d3a3322836f44ba8e7b6141a10e0 Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Fri, 8 Apr 2022 09:37:57 +0200 Subject: [PATCH 03/18] Marge applierV3Internal into applierV3 interface --- server/etcdserver/apply.go | 12 +++++------- server/etcdserver/uber_applier.go | 17 +++-------------- 2 files changed, 8 insertions(+), 21 deletions(-) diff --git a/server/etcdserver/apply.go b/server/etcdserver/apply.go index f54099d6baa..1436bc338b2 100644 --- a/server/etcdserver/apply.go +++ b/server/etcdserver/apply.go @@ -48,13 +48,6 @@ type applyResult struct { trace *traceutil.Trace } -// applierV3Internal is the interface for processing internal V3 raft request -type applierV3Internal interface { - ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) - ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) - DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) -} - type ApplyFunc func(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult // applierV3 is the interface for processing V3 raft messages @@ -94,6 +87,11 @@ type applierV3 interface { RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) + + // processing internal V3 raft request + ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) + ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) + DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) } type applierV3backend struct { diff --git a/server/etcdserver/uber_applier.go b/server/etcdserver/uber_applier.go index f8e41dbc02d..57f10681dec 100644 --- a/server/etcdserver/uber_applier.go +++ b/server/etcdserver/uber_applier.go @@ -37,11 +37,6 @@ type uberApplier struct { // This is the applier used for wrapping when alarms change applyV3base applierV3 - - // applyV3Internal is the applier for internal requests - // (that seems to bypass wrappings) - // TODO(ptab): Seems artificial and could be part of the regular stack. - applyV3Internal applierV3Internal } func newUberApplier(s *EtcdServer) *uberApplier { @@ -53,7 +48,6 @@ func newUberApplier(s *EtcdServer) *uberApplier { warningApplyDuration: s.Cfg.WarningApplyDuration, applyV3: applyV3base_, applyV3base: applyV3base_, - applyV3Internal: newApplierV3Internal(s), } ua.RestoreAlarms() return ua @@ -63,11 +57,6 @@ func newApplierV3Backend(s *EtcdServer) applierV3 { return &applierV3backend{s: s} } -func newApplierV3Internal(s *EtcdServer) applierV3Internal { - base := &applierV3backend{s: s} - return base -} - func newApplierV3(s *EtcdServer) applierV3 { return newAuthApplierV3( s.AuthStore(), @@ -108,15 +97,15 @@ func (a *uberApplier) dispatch(ctx context.Context, r *pb.InternalRaftRequest, s switch { case r.ClusterVersionSet != nil: // Implemented in 3.5.x op = "ClusterVersionSet" - a.applyV3Internal.ClusterVersionSet(r.ClusterVersionSet, shouldApplyV3) + a.applyV3.ClusterVersionSet(r.ClusterVersionSet, shouldApplyV3) return ar case r.ClusterMemberAttrSet != nil: op = "ClusterMemberAttrSet" // Implemented in 3.5.x - a.applyV3Internal.ClusterMemberAttrSet(r.ClusterMemberAttrSet, shouldApplyV3) + a.applyV3.ClusterMemberAttrSet(r.ClusterMemberAttrSet, shouldApplyV3) return ar case r.DowngradeInfoSet != nil: op = "DowngradeInfoSet" // Implemented in 3.5.x - a.applyV3Internal.DowngradeInfoSet(r.DowngradeInfoSet, shouldApplyV3) + a.applyV3.DowngradeInfoSet(r.DowngradeInfoSet, shouldApplyV3) return ar } From 651de5a057c00e204aa74407f93e8f2b5e9ebc00 Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Sun, 3 Apr 2022 20:08:30 +0200 Subject: [PATCH 04/18] Rename EtcdServer.Id with EtcdServer.MemberId. It was misleading and error prone vs. ClusterId. --- server/embed/etcd.go | 2 +- server/etcdserver/adapters.go | 2 +- server/etcdserver/api/v3rpc/header.go | 2 +- server/etcdserver/api/v3rpc/interceptor.go | 4 +- server/etcdserver/api/v3rpc/maintenance.go | 2 +- server/etcdserver/api/v3rpc/member.go | 2 +- server/etcdserver/api/v3rpc/quota.go | 4 +- server/etcdserver/api/v3rpc/watch.go | 2 +- server/etcdserver/apply.go | 2 +- server/etcdserver/corrupt.go | 22 +++--- server/etcdserver/server.go | 78 ++++++++++----------- server/etcdserver/server_test.go | 22 +++--- tests/framework/integration/cluster.go | 22 +++--- tests/integration/cluster_test.go | 18 ++--- tests/integration/network_partition_test.go | 2 +- tests/integration/v3_leadership_test.go | 6 +- tests/integration/v3_watch_restore_test.go | 2 +- 17 files changed, 97 insertions(+), 97 deletions(-) diff --git a/server/embed/etcd.go b/server/embed/etcd.go index 663e082d3de..ffd239c7998 100644 --- a/server/embed/etcd.go +++ b/server/embed/etcd.go @@ -275,7 +275,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { e.cfg.logger.Info( "now serving peer/client/metrics", - zap.String("local-member-id", e.Server.ID().String()), + zap.String("local-member-id", e.Server.MemberId().String()), zap.Strings("initial-advertise-peer-urls", e.cfg.getAPURLs()), zap.Strings("listen-peer-urls", e.cfg.getLPURLs()), zap.Strings("advertise-client-urls", e.cfg.getACURLs()), diff --git a/server/etcdserver/adapters.go b/server/etcdserver/adapters.go index f864507bf52..8a95b9488fd 100644 --- a/server/etcdserver/adapters.go +++ b/server/etcdserver/adapters.go @@ -70,7 +70,7 @@ func (s *serverVersionAdapter) GetDowngradeInfo() *serverversion.DowngradeInfo { } func (s *serverVersionAdapter) GetMembersVersions() map[string]*version.Versions { - return getMembersVersions(s.lg, s.cluster, s.id, s.peerRt, s.Cfg.ReqTimeout()) + return getMembersVersions(s.lg, s.cluster, s.MemberId(), s.peerRt, s.Cfg.ReqTimeout()) } func (s *serverVersionAdapter) GetStorageVersion() *semver.Version { diff --git a/server/etcdserver/api/v3rpc/header.go b/server/etcdserver/api/v3rpc/header.go index 112cc922ea1..48886229284 100644 --- a/server/etcdserver/api/v3rpc/header.go +++ b/server/etcdserver/api/v3rpc/header.go @@ -29,7 +29,7 @@ type header struct { func newHeader(s *etcdserver.EtcdServer) header { return header{ clusterID: int64(s.Cluster().ID()), - memberID: int64(s.ID()), + memberID: int64(s.MemberId()), sg: s, rev: func() int64 { return s.KV().Rev() }, } diff --git a/server/etcdserver/api/v3rpc/interceptor.go b/server/etcdserver/api/v3rpc/interceptor.go index 47f75654e21..8057812557c 100644 --- a/server/etcdserver/api/v3rpc/interceptor.go +++ b/server/etcdserver/api/v3rpc/interceptor.go @@ -49,7 +49,7 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { return nil, rpctypes.ErrGRPCNotCapable } - if s.IsMemberExist(s.ID()) && s.IsLearner() && !isRPCSupportedForLearner(req) { + if s.IsMemberExist(s.MemberId()) && s.IsLearner() && !isRPCSupportedForLearner(req) { return nil, rpctypes.ErrGRPCNotSupportedForLearner } @@ -218,7 +218,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor return rpctypes.ErrGRPCNotCapable } - if s.IsMemberExist(s.ID()) && s.IsLearner() && info.FullMethod != snapshotMethod { // learner does not support stream RPC except Snapshot + if s.IsMemberExist(s.MemberId()) && s.IsLearner() && info.FullMethod != snapshotMethod { // learner does not support stream RPC except Snapshot return rpctypes.ErrGRPCNotSupportedForLearner } diff --git a/server/etcdserver/api/v3rpc/maintenance.go b/server/etcdserver/api/v3rpc/maintenance.go index 6eea479f041..10f03d19f88 100644 --- a/server/etcdserver/api/v3rpc/maintenance.go +++ b/server/etcdserver/api/v3rpc/maintenance.go @@ -250,7 +250,7 @@ func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) ( } func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) { - if ms.rg.ID() != ms.rg.Leader() { + if ms.rg.MemberId() != ms.rg.Leader() { return nil, rpctypes.ErrGRPCNotLeader } diff --git a/server/etcdserver/api/v3rpc/member.go b/server/etcdserver/api/v3rpc/member.go index 54fcc24843d..001eba9d4aa 100644 --- a/server/etcdserver/api/v3rpc/member.go +++ b/server/etcdserver/api/v3rpc/member.go @@ -106,7 +106,7 @@ func (cs *ClusterServer) MemberPromote(ctx context.Context, r *pb.MemberPromoteR } func (cs *ClusterServer) header() *pb.ResponseHeader { - return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.server.Term()} + return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.MemberId()), RaftTerm: cs.server.Term()} } func membersToProtoMembers(membs []*membership.Member) []*pb.Member { diff --git a/server/etcdserver/api/v3rpc/quota.go b/server/etcdserver/api/v3rpc/quota.go index fd41bc13339..9af5fdae723 100644 --- a/server/etcdserver/api/v3rpc/quota.go +++ b/server/etcdserver/api/v3rpc/quota.go @@ -53,7 +53,7 @@ func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error { func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer { return "aKVServer{ NewKVServer(s), - quotaAlarmer{storage.NewBackendQuota(s.Cfg, s.Backend(), "kv"), s, s.ID()}, + quotaAlarmer{storage.NewBackendQuota(s.Cfg, s.Backend(), "kv"), s, s.MemberId()}, } } @@ -86,6 +86,6 @@ func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequ func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { return "aLeaseServer{ NewLeaseServer(s), - quotaAlarmer{storage.NewBackendQuota(s.Cfg, s.Backend(), "lease"), s, s.ID()}, + quotaAlarmer{storage.NewBackendQuota(s.Cfg, s.Backend(), "lease"), s, s.MemberId()}, } } diff --git a/server/etcdserver/api/v3rpc/watch.go b/server/etcdserver/api/v3rpc/watch.go index b8466354b1e..4da07274e2a 100644 --- a/server/etcdserver/api/v3rpc/watch.go +++ b/server/etcdserver/api/v3rpc/watch.go @@ -52,7 +52,7 @@ func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { lg: s.Cfg.Logger, clusterID: int64(s.Cluster().ID()), - memberID: int64(s.ID()), + memberID: int64(s.MemberId()), maxRequestBytes: int(s.Cfg.MaxRequestBytes + grpcOverheadBytes), diff --git a/server/etcdserver/apply.go b/server/etcdserver/apply.go index 1436bc338b2..7057c57aed8 100644 --- a/server/etcdserver/apply.go +++ b/server/etcdserver/apply.go @@ -442,7 +442,7 @@ func removeNeedlessRangeReqs(txn *pb.TxnRequest) { func newHeader(s *EtcdServer) *pb.ResponseHeader { return &pb.ResponseHeader{ ClusterId: uint64(s.Cluster().ID()), - MemberId: uint64(s.ID()), + MemberId: uint64(s.MemberId()), Revision: s.KV().Rev(), RaftTerm: s.Term(), } diff --git a/server/etcdserver/corrupt.go b/server/etcdserver/corrupt.go index 81288d5cbaf..a1f06796c3f 100644 --- a/server/etcdserver/corrupt.go +++ b/server/etcdserver/corrupt.go @@ -45,13 +45,13 @@ func (s *EtcdServer) CheckInitialHashKV() error { lg.Info( "starting initial corruption check", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.Duration("timeout", s.Cfg.ReqTimeout()), ) h, rev, crev, err := s.kv.HashByRev(0) if err != nil { - return fmt.Errorf("%s failed to fetch hash (%v)", s.ID(), err) + return fmt.Errorf("%s failed to fetch hash (%v)", s.MemberId(), err) } peers := s.getPeerHashKVs(rev) mismatch := 0 @@ -59,7 +59,7 @@ func (s *EtcdServer) CheckInitialHashKV() error { if p.resp != nil { peerID := types.ID(p.resp.Header.MemberId) fields := []zap.Field{ - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.Int64("local-member-revision", rev), zap.Int64("local-member-compact-revision", crev), zap.Uint32("local-member-hash", h), @@ -87,7 +87,7 @@ func (s *EtcdServer) CheckInitialHashKV() error { case rpctypes.ErrFutureRev: lg.Warn( "cannot fetch hash from slow remote peer", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.Int64("local-member-revision", rev), zap.Int64("local-member-compact-revision", crev), zap.Uint32("local-member-hash", h), @@ -98,7 +98,7 @@ func (s *EtcdServer) CheckInitialHashKV() error { case rpctypes.ErrCompacted: lg.Warn( "cannot fetch hash from remote peer; local member is behind", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.Int64("local-member-revision", rev), zap.Int64("local-member-compact-revision", crev), zap.Uint32("local-member-hash", h), @@ -110,12 +110,12 @@ func (s *EtcdServer) CheckInitialHashKV() error { } } if mismatch > 0 { - return fmt.Errorf("%s found data inconsistency with peers", s.ID()) + return fmt.Errorf("%s found data inconsistency with peers", s.MemberId()) } lg.Info( "initial corruption checking passed; no corruption", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), ) return nil } @@ -129,7 +129,7 @@ func (s *EtcdServer) monitorKVHash() { lg := s.Logger() lg.Info( "enabled corruption checking", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.Duration("interval", t), ) @@ -195,7 +195,7 @@ func (s *EtcdServer) checkHashKV() error { zap.Int64("compact-revision-2", crev2), zap.Uint32("hash-2", h2), ) - mismatch(uint64(s.ID())) + mismatch(uint64(s.MemberId())) } checkedCount := 0 @@ -262,7 +262,7 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp { members := s.cluster.Members() peers := make([]peerInfo, 0, len(members)) for _, m := range members { - if m.ID == s.ID() { + if m.ID == s.MemberId() { continue } peers = append(peers, peerInfo{id: m.ID, eps: m.PeerURLs}) @@ -288,7 +288,7 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp { } lg.Warn( "failed hash kv request", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.Int64("requested-revision", rev), zap.String("remote-peer-endpoint", ep), zap.Error(lastErr), diff --git a/server/etcdserver/server.go b/server/etcdserver/server.go index 0ac4d178d5d..0e76eabc1c5 100644 --- a/server/etcdserver/server.go +++ b/server/etcdserver/server.go @@ -240,7 +240,7 @@ type EtcdServer struct { leaderChanged *notify.Notifier errorc chan error - id types.ID + memberId types.ID attributes membership.Attributes cluster *membership.RaftCluster @@ -323,7 +323,7 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { v2store: b.storage.st, snapshotter: b.ss, r: *b.raft.newRaftNode(b.ss, b.storage.wal.w, b.cluster.cl), - id: b.cluster.nodeID, + memberId: b.cluster.nodeID, attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, cluster: b.cluster.cl, stats: sstats, @@ -461,7 +461,7 @@ func (s *EtcdServer) adjustTicks() { ticks := s.Cfg.ElectionTicks - 1 lg.Info( "started as single-node; fast-forwarding election ticks", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.Int("forward-ticks", ticks), zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)), zap.Int("election-ticks", s.Cfg.ElectionTicks), @@ -500,7 +500,7 @@ func (s *EtcdServer) adjustTicks() { lg.Info( "initialized peer connections; fast-forwarding election ticks", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.Int("forward-ticks", ticks), zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)), zap.Int("election-ticks", s.Cfg.ElectionTicks), @@ -566,7 +566,7 @@ func (s *EtcdServer) start() { if s.ClusterVersion() != nil { lg.Info( "starting etcd server", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("local-server-version", version.Version), zap.String("cluster-id", s.Cluster().ID().String()), zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())), @@ -575,7 +575,7 @@ func (s *EtcdServer) start() { } else { lg.Info( "starting etcd server", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("local-server-version", version.Version), zap.String("cluster-version", "to_be_decided"), ) @@ -695,7 +695,7 @@ func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error { if s.cluster.IsIDRemoved(types.ID(m.From)) { lg.Warn( "rejected Raft message from removed member", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("removed-member-id", types.ID(m.From).String()), ) return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member") @@ -1057,7 +1057,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { lg.Info("adding peers from new cluster configuration") for _, m := range s.cluster.Members() { - if m.ID == s.ID() { + if m.ID == s.MemberId() { continue } s.r.transport.AddPeer(m.ID, m.PeerURLs) @@ -1116,7 +1116,7 @@ func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { lg := s.Logger() lg.Info( "triggering snapshot", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.Uint64("local-member-applied-index", ep.appliedi), zap.Uint64("local-member-snapshot-index", ep.snapi), zap.Uint64("local-member-snapshot-count", s.Cfg.SnapshotCount), @@ -1137,7 +1137,7 @@ func (s *EtcdServer) hasMultipleVotingMembers() bool { } func (s *EtcdServer) isLeader() bool { - return uint64(s.ID()) == s.Lead() + return uint64(s.MemberId()) == s.Lead() } // MoveLeader transfers the leader to the given transferee. @@ -1152,7 +1152,7 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er lg := s.Logger() lg.Info( "leadership transfer starting", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("current-leader-member-id", types.ID(lead).String()), zap.String("transferee-member-id", types.ID(transferee).String()), ) @@ -1169,7 +1169,7 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er // TODO: drain all requests, or drop all messages to the old leader lg.Info( "leadership transfer finished", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("old-leader-member-id", types.ID(lead).String()), zap.String("new-leader-member-id", types.ID(transferee).String()), zap.Duration("took", time.Since(now)), @@ -1183,7 +1183,7 @@ func (s *EtcdServer) TransferLeadership() error { if !s.isLeader() { lg.Info( "skipped leadership transfer; local server is not leader", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("current-leader-member-id", types.ID(s.Lead()).String()), ) return nil @@ -1192,7 +1192,7 @@ func (s *EtcdServer) TransferLeadership() error { if !s.hasMultipleVotingMembers() { lg.Info( "skipped leadership transfer for single voting member cluster", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("current-leader-member-id", types.ID(s.Lead()).String()), ) return nil @@ -1229,7 +1229,7 @@ func (s *EtcdServer) HardStop() { func (s *EtcdServer) Stop() { lg := s.Logger() if err := s.TransferLeadership(); err != nil { - lg.Warn("leadership transfer failed", zap.String("local-member-id", s.ID().String()), zap.Error(err)) + lg.Warn("leadership transfer failed", zap.String("local-member-id", s.MemberId().String()), zap.Error(err)) } s.HardStop() } @@ -1317,17 +1317,17 @@ func (s *EtcdServer) mayAddMember(memb membership.Member) error { if !memb.IsLearner && !s.cluster.IsReadyToAddVotingMember() { lg.Warn( "rejecting member add request; not enough healthy members", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), zap.Error(ErrNotEnoughStartedMembers), ) return ErrNotEnoughStartedMembers } - if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.VotingMembers()) { + if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.MemberId(), s.cluster.VotingMembers()) { lg.Warn( "rejecting member add request; local member has not been connected to all peers, reconfigure breaks active quorum", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), zap.Error(ErrUnhealthy), ) @@ -1446,7 +1446,7 @@ func (s *EtcdServer) mayPromoteMember(id types.ID) error { if !s.cluster.IsReadyToPromoteMember(uint64(id)) { lg.Warn( "rejecting member promote request; not enough healthy members", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-remove-id", id.String()), zap.Error(ErrNotEnoughStartedMembers), ) @@ -1505,7 +1505,7 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error { if !s.cluster.IsReadyToRemoveVotingMember(uint64(id)) { lg.Warn( "rejecting member remove request; not enough healthy members", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-remove-id", id.String()), zap.Error(ErrNotEnoughStartedMembers), ) @@ -1513,17 +1513,17 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error { } // downed member is safe to remove since it's not part of the active quorum - if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() { + if t := s.r.transport.ActiveSince(id); id != s.MemberId() && t.IsZero() { return nil } // protect quorum if some members are down m := s.cluster.VotingMembers() - active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m) + active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.MemberId(), m) if (active - 1) < 1+((len(m)-1)/2) { lg.Warn( "rejecting member remove request; local member has not been connected to all peers, reconfigure breaks active quorum", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-remove", id.String()), zap.Int("active-peers", active), zap.Error(ErrUnhealthy), @@ -1597,14 +1597,14 @@ func (s *EtcdServer) FirstCommitInTermNotify() <-chan struct{} { // RaftStatusGetter represents etcd server and Raft progress. type RaftStatusGetter interface { - ID() types.ID + MemberId() types.ID Leader() types.ID CommittedIndex() uint64 AppliedIndex() uint64 Term() uint64 } -func (s *EtcdServer) ID() types.ID { return s.id } +func (s *EtcdServer) MemberId() types.ID { return s.memberId } func (s *EtcdServer) Leader() types.ID { return types.ID(s.getLead()) } @@ -1643,7 +1643,7 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*me resp := x.(*confChangeResponse) lg.Info( "applied a configuration change through raft", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("raft-conf-change", cc.Type.String()), zap.String("raft-conf-change-node-id", types.ID(cc.NodeID).String()), ) @@ -1684,7 +1684,7 @@ func (s *EtcdServer) sync(timeout time.Duration) { // or its server is stopped. func (s *EtcdServer) publishV3(timeout time.Duration) { req := &membershippb.ClusterMemberAttrSetRequest{ - Member_ID: uint64(s.id), + Member_ID: uint64(s.MemberId()), MemberAttributes: &membershippb.Attributes{ Name: s.attributes.Name, ClientUrls: s.attributes.ClientURLs, @@ -1696,7 +1696,7 @@ func (s *EtcdServer) publishV3(timeout time.Duration) { case <-s.stopping: lg.Warn( "stopped publish because server is stopping", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), zap.Duration("publish-timeout", timeout), ) @@ -1713,7 +1713,7 @@ func (s *EtcdServer) publishV3(timeout time.Duration) { close(s.readych) lg.Info( "published local member to cluster through raft", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), zap.String("cluster-id", s.cluster.ID().String()), zap.Duration("publish-timeout", timeout), @@ -1723,7 +1723,7 @@ func (s *EtcdServer) publishV3(timeout time.Duration) { default: lg.Warn( "failed to publish local member to cluster through raft", - zap.String("local-member-id", s.ID().String()), + zap.String("local-member-id", s.MemberId().String()), zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)), zap.Duration("publish-timeout", timeout), zap.Error(err), @@ -1737,7 +1737,7 @@ func (s *EtcdServer) sendMergedSnap(merged snap.Message) { lg := s.Logger() fields := []zap.Field{ - zap.String("from", s.ID().String()), + zap.String("from", s.MemberId().String()), zap.String("to", types.ID(merged.To).String()), zap.Int64("bytes", merged.TotalSize), zap.String("size", humanize.Bytes(uint64(merged.TotalSize))), @@ -1917,7 +1917,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { s.GoAttach(func() { a := &pb.AlarmRequest{ - MemberID: uint64(s.ID()), + MemberID: uint64(s.MemberId()), Action: pb.AlarmRequest_ACTIVATE, Alarm: pb.AlarmType_NOSPACE, } @@ -1963,13 +1963,13 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con } else { s.cluster.AddMember(&confChangeContext.Member, shouldApplyV3) - if confChangeContext.Member.ID != s.id { + if confChangeContext.Member.ID != s.MemberId() { s.r.transport.AddPeer(confChangeContext.Member.ID, confChangeContext.PeerURLs) } } // update the isLearner metric when this server id is equal to the id in raft member confChange - if confChangeContext.Member.ID == s.id { + if confChangeContext.Member.ID == s.MemberId() { if cc.Type == raftpb.ConfChangeAddLearnerNode { isLearner.Set(1) } else { @@ -1980,7 +1980,7 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con case raftpb.ConfChangeRemoveNode: id := types.ID(cc.NodeID) s.cluster.RemoveMember(id, shouldApplyV3) - if id == s.id { + if id == s.MemberId() { return true, nil } s.r.transport.RemovePeer(id) @@ -1998,7 +1998,7 @@ func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.Con ) } s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes, shouldApplyV3) - if m.ID != s.id { + if m.ID != s.MemberId() { s.r.transport.UpdatePeer(m.ID, m.PeerURLs) } } @@ -2133,7 +2133,7 @@ func (s *EtcdServer) monitorClusterVersions() { return } - if s.Leader() != s.ID() { + if s.Leader() != s.MemberId() { continue } monitor.UpdateClusterVersionIfNeeded() @@ -2268,8 +2268,8 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { switch lead { case types.ID(raft.None): // TODO: return error to specify it happens because the cluster does not have leader now - case s.ID(): - if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) { + case s.MemberId(): + if !isConnectedToQuorumSince(s.r.transport, start, s.MemberId(), s.cluster.Members()) { return ErrTimeoutDueToConnectionLost } default: diff --git a/server/etcdserver/server_test.go b/server/etcdserver/server_test.go index 3294b1365b5..dcad5c23c9b 100644 --- a/server/etcdserver/server_test.go +++ b/server/etcdserver/server_test.go @@ -610,12 +610,12 @@ func TestApplyConfChangeShouldStop(t *testing.T) { }) lg := zaptest.NewLogger(t) srv := &EtcdServer{ - lgMu: new(sync.RWMutex), - lg: lg, - id: 1, - r: *r, - cluster: cl, - beHooks: serverstorage.NewBackendHooks(lg, nil), + lgMu: new(sync.RWMutex), + lg: lg, + memberId: 1, + r: *r, + cluster: cl, + beHooks: serverstorage.NewBackendHooks(lg, nil), } cc := raftpb.ConfChange{ Type: raftpb.ConfChangeRemoveNode, @@ -658,7 +658,7 @@ func TestApplyConfigChangeUpdatesConsistIndex(t *testing.T) { srv := &EtcdServer{ lgMu: new(sync.RWMutex), lg: lg, - id: 1, + memberId: 1, r: *realisticRaftNode(lg), cluster: cl, w: wait.New(), @@ -739,7 +739,7 @@ func TestApplyMultiConfChangeShouldStop(t *testing.T) { srv := &EtcdServer{ lgMu: new(sync.RWMutex), lg: lg, - id: 2, + memberId: 2, r: *r, cluster: cl, w: wait.New(), @@ -1487,7 +1487,7 @@ func TestPublishV3(t *testing.T) { lg: lg, readych: make(chan struct{}), Cfg: config.ServerConfig{Logger: lg, TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries, MaxRequestBytes: 1000}, - id: 1, + memberId: 1, r: *newRaftNode(raftNodeConfig{lg: lg, Node: n}), attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}}, cluster: &membership.RaftCluster{}, @@ -1557,7 +1557,7 @@ func TestPublishV3Retry(t *testing.T) { lg: lg, readych: make(chan struct{}), Cfg: config.ServerConfig{Logger: lg, TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries, MaxRequestBytes: 1000}, - id: 1, + memberId: 1, r: *newRaftNode(raftNodeConfig{lg: lg, Node: n}), w: mockwait.NewNop(), stopping: make(chan struct{}), @@ -1604,7 +1604,7 @@ func TestUpdateVersion(t *testing.T) { srv := &EtcdServer{ lgMu: new(sync.RWMutex), lg: zaptest.NewLogger(t), - id: 1, + memberId: 1, Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries}, r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: n}), attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://node1.com"}}, diff --git a/tests/framework/integration/cluster.go b/tests/framework/integration/cluster.go index 125e228bc26..a5949970186 100644 --- a/tests/framework/integration/cluster.go +++ b/tests/framework/integration/cluster.go @@ -352,7 +352,7 @@ func (c *Cluster) RemoveMember(t testutil.TB, cc *clientv3.Client, id uint64) er } newMembers := make([]*Member, 0) for _, m := range c.Members { - if uint64(m.Server.ID()) != id { + if uint64(m.Server.MemberId()) != id { newMembers = append(newMembers, m) } else { m.Client.Close() @@ -363,7 +363,7 @@ func (c *Cluster) RemoveMember(t testutil.TB, cc *clientv3.Client, id uint64) er // TODO: remove connection write timeout by selecting on http response closeNotifier // blocking on https://github.com/golang/go/issues/9524 case <-time.After(time.Second + time.Duration(ElectionTicks)*TickDuration + time.Second + rafthttp.ConnWriteTimeout): - t.Fatalf("failed to remove member %s in time", m.Server.ID()) + t.Fatalf("failed to remove member %s in time", m.Server.MemberId()) } } } @@ -436,7 +436,7 @@ func (c *Cluster) waitMembersForLeader(ctx context.Context, t testutil.TB, membs possibleLead := make(map[uint64]bool) var lead uint64 for _, m := range membs { - possibleLead[uint64(m.Server.ID())] = true + possibleLead[uint64(m.Server.MemberId())] = true } cc, err := c.ClusterClient() if err != nil { @@ -470,7 +470,7 @@ func (c *Cluster) waitMembersForLeader(ctx context.Context, t testutil.TB, membs } for i, m := range membs { - if uint64(m.Server.ID()) == lead { + if uint64(m.Server.MemberId()) == lead { t.Logf("waitMembersForLeader found leader. Member: %v lead: %x", i, lead) return i } @@ -841,7 +841,7 @@ func (m *Member) ElectionTimeout() time.Duration { return time.Duration(m.Server.Cfg.ElectionTicks*int(m.Server.Cfg.TickMs)) * time.Millisecond } -func (m *Member) ID() types.ID { return m.Server.ID() } +func (m *Member) ID() types.ID { return m.Server.MemberId() } // NewClientV3 creates a new grpc client connection to the member func NewClientV3(m *Member) (*clientv3.Client, error) { @@ -1307,18 +1307,18 @@ func (m *Member) Metric(metricName string, expectLabels ...string) (string, erro // InjectPartition drops connections from m to others, vice versa. func (m *Member) InjectPartition(t testutil.TB, others ...*Member) { for _, other := range others { - m.Server.CutPeer(other.Server.ID()) - other.Server.CutPeer(m.Server.ID()) - t.Logf("network partition injected between: %v <-> %v", m.Server.ID(), other.Server.ID()) + m.Server.CutPeer(other.Server.MemberId()) + other.Server.CutPeer(m.Server.MemberId()) + t.Logf("network partition injected between: %v <-> %v", m.Server.MemberId(), other.Server.MemberId()) } } // RecoverPartition recovers connections from m to others, vice versa. func (m *Member) RecoverPartition(t testutil.TB, others ...*Member) { for _, other := range others { - m.Server.MendPeer(other.Server.ID()) - other.Server.MendPeer(m.Server.ID()) - t.Logf("network partition between: %v <-> %v", m.Server.ID(), other.Server.ID()) + m.Server.MendPeer(other.Server.MemberId()) + other.Server.MendPeer(m.Server.MemberId()) + t.Logf("network partition between: %v <-> %v", m.Server.MemberId(), other.Server.MemberId()) } } diff --git a/tests/integration/cluster_test.go b/tests/integration/cluster_test.go index 5a7ff45920d..50e52cc7821 100644 --- a/tests/integration/cluster_test.go +++ b/tests/integration/cluster_test.go @@ -101,7 +101,7 @@ func testDecreaseClusterSize(t *testing.T, size int) { // TODO: remove the last but one member for i := 0; i < size-1; i++ { - id := c.Members[len(c.Members)-1].Server.ID() + id := c.Members[len(c.Members)-1].Server.MemberId() // may hit second leader election on slow machines if err := c.RemoveMember(t, c.Members[0].Client, uint64(id)); err != nil { if strings.Contains(err.Error(), "no leader") { @@ -179,7 +179,7 @@ func TestAddMemberAfterClusterFullRotation(t *testing.T) { // remove all the previous three members and add in three new members. for i := 0; i < 3; i++ { - if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[1].Server.ID())); err != nil { + if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[1].Server.MemberId())); err != nil { t.Fatal(err) } c.WaitMembersForLeader(t, c.Members) @@ -200,7 +200,7 @@ func TestIssue2681(t *testing.T) { c := integration.NewCluster(t, &integration.ClusterConfig{Size: 5}) defer c.Terminate(t) - if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[4].Server.ID())); err != nil { + if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[4].Server.MemberId())); err != nil { t.Fatal(err) } c.WaitMembersForLeader(t, c.Members) @@ -226,7 +226,7 @@ func testIssue2746(t *testing.T, members int) { clusterMustProgress(t, c.Members) } - if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[members-1].Server.ID())); err != nil { + if err := c.RemoveMember(t, c.Members[0].Client, uint64(c.Members[members-1].Server.MemberId())); err != nil { t.Fatal(err) } c.WaitMembersForLeader(t, c.Members) @@ -251,7 +251,7 @@ func TestIssue2904(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), integration.RequestTimeout) // the proposal is not committed because member 1 is stopped, but the // proposal is appended to leader'Server raft log. - c.Members[0].Client.MemberRemove(ctx, uint64(c.Members[2].Server.ID())) + c.Members[0].Client.MemberRemove(ctx, uint64(c.Members[2].Server.MemberId())) cancel() // restart member, and expect it to send UpdateAttributes request. @@ -381,7 +381,7 @@ func TestRejectUnhealthyRemove(t *testing.T) { leader := c.WaitLeader(t) // reject remove active member since (3,2)-(1,0) => (2,2) lacks quorum - err := c.RemoveMember(t, c.Members[leader].Client, uint64(c.Members[2].Server.ID())) + err := c.RemoveMember(t, c.Members[leader].Client, uint64(c.Members[2].Server.MemberId())) if err == nil { t.Fatalf("should reject quorum breaking remove: %s", err) } @@ -394,7 +394,7 @@ func TestRejectUnhealthyRemove(t *testing.T) { time.Sleep(time.Duration(integration.ElectionTicks * int(integration.TickDuration))) // permit remove dead member since (3,2) - (0,1) => (3,1) has quorum - if err = c.RemoveMember(t, c.Members[2].Client, uint64(c.Members[0].Server.ID())); err != nil { + if err = c.RemoveMember(t, c.Members[2].Client, uint64(c.Members[0].Server.MemberId())); err != nil { t.Fatalf("should accept removing down member: %s", err) } @@ -405,7 +405,7 @@ func TestRejectUnhealthyRemove(t *testing.T) { time.Sleep((3 * etcdserver.HealthInterval) / 2) // accept remove member since (4,1)-(1,0) => (3,1) has quorum - if err = c.RemoveMember(t, c.Members[1].Client, uint64(c.Members[0].Server.ID())); err != nil { + if err = c.RemoveMember(t, c.Members[1].Client, uint64(c.Members[0].Server.MemberId())); err != nil { t.Fatalf("expected to remove member, got error %v", err) } } @@ -429,7 +429,7 @@ func TestRestartRemoved(t *testing.T) { firstMember.KeepDataDirTerminate = true // 3. remove first member, shut down without deleting data - if err := c.RemoveMember(t, c.Members[1].Client, uint64(firstMember.Server.ID())); err != nil { + if err := c.RemoveMember(t, c.Members[1].Client, uint64(firstMember.Server.MemberId())); err != nil { t.Fatalf("expected to remove member, got error %v", err) } c.WaitLeader(t) diff --git a/tests/integration/network_partition_test.go b/tests/integration/network_partition_test.go index b5d73a30a85..c3b08f23c5d 100644 --- a/tests/integration/network_partition_test.go +++ b/tests/integration/network_partition_test.go @@ -96,7 +96,7 @@ func testNetworkPartition5MembersLeaderInMajority(t *testing.T) error { // leader must be hold in majority leadIndex2 := clus.WaitMembersForLeader(t, majorityMembers) - leadID, leadID2 := clus.Members[leadIndex].Server.ID(), majorityMembers[leadIndex2].Server.ID() + leadID, leadID2 := clus.Members[leadIndex].Server.MemberId(), majorityMembers[leadIndex2].Server.MemberId() if leadID != leadID2 { return fmt.Errorf("unexpected leader change from %s, got %s", leadID, leadID2) } diff --git a/tests/integration/v3_leadership_test.go b/tests/integration/v3_leadership_test.go index 84bd97d3284..519bb0a8fd9 100644 --- a/tests/integration/v3_leadership_test.go +++ b/tests/integration/v3_leadership_test.go @@ -37,7 +37,7 @@ func testMoveLeader(t *testing.T, auto bool) { defer clus.Terminate(t) oldLeadIdx := clus.WaitLeader(t) - oldLeadID := uint64(clus.Members[oldLeadIdx].Server.ID()) + oldLeadID := uint64(clus.Members[oldLeadIdx].Server.MemberId()) // ensure followers go through leader transition while leadership transfer idc := make(chan uint64) @@ -55,7 +55,7 @@ func testMoveLeader(t *testing.T, auto bool) { } } - target := uint64(clus.Members[(oldLeadIdx+1)%3].Server.ID()) + target := uint64(clus.Members[(oldLeadIdx+1)%3].Server.MemberId()) if auto { err := clus.Members[oldLeadIdx].Server.TransferLeadership() if err != nil { @@ -107,7 +107,7 @@ func TestMoveLeaderError(t *testing.T) { oldLeadIdx := clus.WaitLeader(t) followerIdx := (oldLeadIdx + 1) % 3 - target := uint64(clus.Members[(oldLeadIdx+2)%3].Server.ID()) + target := uint64(clus.Members[(oldLeadIdx+2)%3].Server.MemberId()) mvc := integration.ToGRPC(clus.Client(followerIdx)).Maintenance _, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target}) diff --git a/tests/integration/v3_watch_restore_test.go b/tests/integration/v3_watch_restore_test.go index 2f3271eb96f..39fd91879b7 100644 --- a/tests/integration/v3_watch_restore_test.go +++ b/tests/integration/v3_watch_restore_test.go @@ -81,7 +81,7 @@ func TestV3WatchRestoreSnapshotUnsync(t *testing.T) { clus.Members[0].InjectPartition(t, clus.Members[1:]...) initialLead := clus.WaitMembersForLeader(t, clus.Members[1:]) - t.Logf("elected lead: %v", clus.Members[initialLead].Server.ID()) + t.Logf("elected lead: %v", clus.Members[initialLead].Server.MemberId()) t.Logf("sleeping for 2 seconds") time.Sleep(2 * time.Second) t.Logf("sleeping for 2 seconds DONE") From b073129d03e3a0d8e786f11cfb2c7f98c9763823 Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Sun, 3 Apr 2022 23:01:08 +0200 Subject: [PATCH 05/18] Applier does not depend on EtcdServer any longer. All the depencies are explicily passed to the UberApplier factory method. --- server/etcdserver/api/v3rpc/quota.go | 8 +- server/etcdserver/apply.go | 176 +++++++++++++++++---------- server/etcdserver/server.go | 13 +- server/etcdserver/uber_applier.go | 52 ++++++-- server/etcdserver/v3_server.go | 11 +- server/storage/quota.go | 25 ++-- 6 files changed, 186 insertions(+), 99 deletions(-) diff --git a/server/etcdserver/api/v3rpc/quota.go b/server/etcdserver/api/v3rpc/quota.go index 9af5fdae723..21085188650 100644 --- a/server/etcdserver/api/v3rpc/quota.go +++ b/server/etcdserver/api/v3rpc/quota.go @@ -53,7 +53,7 @@ func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error { func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer { return "aKVServer{ NewKVServer(s), - quotaAlarmer{storage.NewBackendQuota(s.Cfg, s.Backend(), "kv"), s, s.MemberId()}, + quotaAlarmer{newBackendQuota(s, "kv"), s, s.MemberId()}, } } @@ -86,6 +86,10 @@ func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequ func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer { return "aLeaseServer{ NewLeaseServer(s), - quotaAlarmer{storage.NewBackendQuota(s.Cfg, s.Backend(), "lease"), s, s.MemberId()}, + quotaAlarmer{newBackendQuota(s, "lease"), s, s.MemberId()}, } } + +func newBackendQuota(s *etcdserver.EtcdServer, name string) storage.Quota { + return storage.NewBackendQuota(s.Logger(), s.Cfg.QuotaBackendBytes, s.Backend(), name) +} diff --git a/server/etcdserver/apply.go b/server/etcdserver/apply.go index 7057c57aed8..5ccda706fef 100644 --- a/server/etcdserver/apply.go +++ b/server/etcdserver/apply.go @@ -25,9 +25,12 @@ import ( "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver/api" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" + "go.etcd.io/etcd/server/v3/etcdserver/cindex" "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/lease" serverstorage "go.etcd.io/etcd/server/v3/storage" + "go.etcd.io/etcd/server/v3/storage/backend" "go.etcd.io/etcd/server/v3/storage/mvcc" "github.com/gogo/protobuf/proto" @@ -89,13 +92,52 @@ type applierV3 interface { RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) // processing internal V3 raft request + ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) } +type SnapshotServer interface { + ForceSnapshot() +} + type applierV3backend struct { - s *EtcdServer + lg *zap.Logger + kv mvcc.KV + alarmStore *v3alarm.AlarmStore + authStore auth.AuthStore + lessor lease.Lessor + cluster *membership.RaftCluster + raftStatus RaftStatusGetter + snapshotServer SnapshotServer + consistentIndex cindex.ConsistentIndexer + + txnModeWriteWithSharedBuffer bool +} + +func newApplierV3Backend( + lg *zap.Logger, + kv mvcc.KV, + alarmStore *v3alarm.AlarmStore, + authStore auth.AuthStore, + lessor lease.Lessor, + cluster *membership.RaftCluster, + raftStatus RaftStatusGetter, + snapshotServer SnapshotServer, + consistentIndex cindex.ConsistentIndexer, + txnModeWriteWithSharedBuffer bool) applierV3 { + return &applierV3backend{ + lg: lg, + kv: kv, + alarmStore: alarmStore, + authStore: authStore, + lessor: lessor, + cluster: cluster, + raftStatus: raftStatus, + snapshotServer: snapshotServer, + consistentIndex: consistentIndex, + txnModeWriteWithSharedBuffer: txnModeWriteWithSharedBuffer} } func (a *applierV3backend) WrapApply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc ApplyFunc) *applyResult { @@ -103,63 +145,63 @@ func (a *applierV3backend) WrapApply(ctx context.Context, r *pb.InternalRaftRequ } func (a *applierV3backend) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { - return Put(ctx, a.s.Logger(), a.s.lessor, a.s.KV(), txn, p) + return Put(ctx, a.lg, a.lessor, a.kv, txn, p) } func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - return DeleteRange(a.s.KV(), txn, dr) + return DeleteRange(a.kv, txn, dr) } func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { - return Range(ctx, a.s.Logger(), a.s.KV(), txn, r) + return Range(ctx, a.lg, a.kv, txn, r) } func (a *applierV3backend) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - return Txn(ctx, a.s.Logger(), rt, a.s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer, a.s.KV(), a.s.lessor) + return Txn(ctx, a.lg, rt, a.txnModeWriteWithSharedBuffer, a.kv, a.lessor) } func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { resp := &pb.CompactionResponse{} resp.Header = &pb.ResponseHeader{} trace := traceutil.New("compact", - a.s.Logger(), + a.lg, traceutil.Field{Key: "revision", Value: compaction.Revision}, ) - ch, err := a.s.KV().Compact(trace, compaction.Revision) + ch, err := a.kv.Compact(trace, compaction.Revision) if err != nil { return nil, ch, nil, err } // get the current revision. which key to get is not important. - rr, _ := a.s.KV().Range(context.TODO(), []byte("compaction"), nil, mvcc.RangeOptions{}) + rr, _ := a.kv.Range(context.TODO(), []byte("compaction"), nil, mvcc.RangeOptions{}) resp.Header.Revision = rr.Rev return resp, ch, trace, err } func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL) + l, err := a.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL) resp := &pb.LeaseGrantResponse{} if err == nil { resp.ID = int64(l.ID) resp.TTL = l.TTL() - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - err := a.s.lessor.Revoke(lease.LeaseID(lc.ID)) - return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err + err := a.lessor.Revoke(lease.LeaseID(lc.ID)) + return &pb.LeaseRevokeResponse{Header: a.newHeader()}, err } func (a *applierV3backend) LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) { for _, c := range lc.Checkpoints { - err := a.s.lessor.Checkpoint(lease.LeaseID(c.ID), c.Remaining_TTL) + err := a.lessor.Checkpoint(lease.LeaseID(c.ID), c.Remaining_TTL) if err != nil { - return &pb.LeaseCheckpointResponse{Header: newHeader(a.s)}, err + return &pb.LeaseCheckpointResponse{Header: a.newHeader()}, err } } - return &pb.LeaseCheckpointResponse{Header: newHeader(a.s)}, nil + return &pb.LeaseCheckpointResponse{Header: a.newHeader()}, nil } func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { @@ -167,18 +209,18 @@ func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) switch ar.Action { case pb.AlarmRequest_GET: - resp.Alarms = a.s.alarmStore.Get(ar.Alarm) + resp.Alarms = a.alarmStore.Get(ar.Alarm) case pb.AlarmRequest_ACTIVATE: if ar.Alarm == pb.AlarmType_NONE { break } - m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm) + m := a.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm) if m == nil { break } resp.Alarms = append(resp.Alarms, m) case pb.AlarmRequest_DEACTIVATE: - m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm) + m := a.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm) if m == nil { break } @@ -214,156 +256,156 @@ func (a *applierV3Capped) LeaseGrant(_ *pb.LeaseGrantRequest) (*pb.LeaseGrantRes } func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) { - err := a.s.AuthStore().AuthEnable() + err := a.authStore.AuthEnable() if err != nil { return nil, err } - return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil + return &pb.AuthEnableResponse{Header: a.newHeader()}, nil } func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) { - a.s.AuthStore().AuthDisable() - return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil + a.authStore.AuthDisable() + return &pb.AuthDisableResponse{Header: a.newHeader()}, nil } func (a *applierV3backend) AuthStatus() (*pb.AuthStatusResponse, error) { - enabled := a.s.AuthStore().IsAuthEnabled() - authRevision := a.s.AuthStore().Revision() - return &pb.AuthStatusResponse{Header: newHeader(a.s), Enabled: enabled, AuthRevision: authRevision}, nil + enabled := a.authStore.IsAuthEnabled() + authRevision := a.authStore.Revision() + return &pb.AuthStatusResponse{Header: a.newHeader(), Enabled: enabled, AuthRevision: authRevision}, nil } func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) { - ctx := context.WithValue(context.WithValue(a.s.ctx, auth.AuthenticateParamIndex{}, a.s.consistIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken) - resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password) + ctx := context.WithValue(context.WithValue(context.Background(), auth.AuthenticateParamIndex{}, a.consistentIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken) + resp, err := a.authStore.Authenticate(ctx, r.Name, r.Password) if resp != nil { - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - resp, err := a.s.AuthStore().UserAdd(r) + resp, err := a.authStore.UserAdd(r) if resp != nil { - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - resp, err := a.s.AuthStore().UserDelete(r) + resp, err := a.authStore.UserDelete(r) if resp != nil { - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - resp, err := a.s.AuthStore().UserChangePassword(r) + resp, err := a.authStore.UserChangePassword(r) if resp != nil { - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - resp, err := a.s.AuthStore().UserGrantRole(r) + resp, err := a.authStore.UserGrantRole(r) if resp != nil { - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - resp, err := a.s.AuthStore().UserGet(r) + resp, err := a.authStore.UserGet(r) if resp != nil { - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - resp, err := a.s.AuthStore().UserRevokeRole(r) + resp, err := a.authStore.UserRevokeRole(r) if resp != nil { - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - resp, err := a.s.AuthStore().RoleAdd(r) + resp, err := a.authStore.RoleAdd(r) if resp != nil { - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - resp, err := a.s.AuthStore().RoleGrantPermission(r) + resp, err := a.authStore.RoleGrantPermission(r) if resp != nil { - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - resp, err := a.s.AuthStore().RoleGet(r) + resp, err := a.authStore.RoleGet(r) if resp != nil { - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - resp, err := a.s.AuthStore().RoleRevokePermission(r) + resp, err := a.authStore.RoleRevokePermission(r) if resp != nil { - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - resp, err := a.s.AuthStore().RoleDelete(r) + resp, err := a.authStore.RoleDelete(r) if resp != nil { - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - resp, err := a.s.AuthStore().UserList(r) + resp, err := a.authStore.UserList(r) if resp != nil { - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - resp, err := a.s.AuthStore().RoleList(r) + resp, err := a.authStore.RoleList(r) if resp != nil { - resp.Header = newHeader(a.s) + resp.Header = a.newHeader() } return resp, err } func (a *applierV3backend) ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) { - prevVersion := a.s.Cluster().Version() + prevVersion := a.cluster.Version() newVersion := semver.Must(semver.NewVersion(r.Ver)) - a.s.cluster.SetVersion(newVersion, api.UpdateCapability, shouldApplyV3) + a.cluster.SetVersion(newVersion, api.UpdateCapability, shouldApplyV3) // Force snapshot after cluster version downgrade. if prevVersion != nil && newVersion.LessThan(*prevVersion) { - lg := a.s.Logger() + lg := a.lg if lg != nil { lg.Info("Cluster version downgrade detected, forcing snapshot", zap.String("prev-cluster-version", prevVersion.String()), zap.String("new-cluster-version", newVersion.String()), ) } - a.s.forceSnapshot = true + a.snapshotServer.ForceSnapshot() } } func (a *applierV3backend) ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) { - a.s.cluster.UpdateAttributes( + a.cluster.UpdateAttributes( types.ID(r.Member_ID), membership.Attributes{ Name: r.MemberAttributes.Name, @@ -378,7 +420,7 @@ func (a *applierV3backend) DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequ if r.Enabled { d = version.DowngradeInfo{Enabled: true, TargetVersion: r.Ver} } - a.s.cluster.SetDowngradeInfo(&d, shouldApplyV3) + a.cluster.SetDowngradeInfo(&d, shouldApplyV3) } type quotaApplierV3 struct { @@ -386,8 +428,8 @@ type quotaApplierV3 struct { q serverstorage.Quota } -func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { - return "aApplierV3{app, serverstorage.NewBackendQuota(s.Cfg, s.Backend(), "v3-applier")} +func newQuotaApplierV3(lg *zap.Logger, quotaBackendBytesCfg int64, be backend.Backend, app applierV3) applierV3 { + return "aApplierV3{app, serverstorage.NewBackendQuota(lg, quotaBackendBytesCfg, be, "v3-applier")} } func (a *quotaApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { @@ -439,11 +481,11 @@ func removeNeedlessRangeReqs(txn *pb.TxnRequest) { txn.Failure = f(txn.Failure) } -func newHeader(s *EtcdServer) *pb.ResponseHeader { +func (a *applierV3backend) newHeader() *pb.ResponseHeader { return &pb.ResponseHeader{ - ClusterId: uint64(s.Cluster().ID()), - MemberId: uint64(s.MemberId()), - Revision: s.KV().Rev(), - RaftTerm: s.Term(), + ClusterId: uint64(a.cluster.ID()), + MemberId: uint64(a.raftStatus.MemberId()), + Revision: a.kv.Rev(), + RaftTerm: a.raftStatus.Term(), } } diff --git a/server/etcdserver/server.go b/server/etcdserver/server.go index 0e76eabc1c5..770654323c0 100644 --- a/server/etcdserver/server.go +++ b/server/etcdserver/server.go @@ -391,7 +391,7 @@ func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) { if err = srv.restoreAlarms(); err != nil { return nil, err } - srv.uberApply = newUberApplier(srv) + srv.uberApply = srv.NewUberApplier() if srv.Cfg.EnableLeaseCheckpoint { // setting checkpointer enables lease checkpoint feature. @@ -1072,7 +1072,12 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { // As backends and implementations like alarmsStore changed, we need // to re-bootstrap Appliers. - s.uberApply = newUberApplier(s) + s.uberApply = s.NewUberApplier() +} + +func (s *EtcdServer) NewUberApplier() *uberApplier { + return newUberApplier(s.lg, s.be, s.KV(), s.alarmStore, s.authStore, s.lessor, s.cluster, s, s, s.consistIndex, + s.Cfg.WarningApplyDuration, s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer, s.Cfg.QuotaBackendBytes) } func verifySnapshotIndex(snapshot raftpb.Snapshot, cindex uint64) { @@ -1109,6 +1114,10 @@ func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { } } +func (s *EtcdServer) ForceSnapshot() { + s.forceSnapshot = true +} + func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { if !s.shouldSnapshot(ep) { return diff --git a/server/etcdserver/uber_applier.go b/server/etcdserver/uber_applier.go index 57f10681dec..c3d6d968621 100644 --- a/server/etcdserver/uber_applier.go +++ b/server/etcdserver/uber_applier.go @@ -20,8 +20,12 @@ import ( "time" pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" + "go.etcd.io/etcd/server/v3/etcdserver/cindex" + "go.etcd.io/etcd/server/v3/lease" + "go.etcd.io/etcd/server/v3/storage/backend" "go.etcd.io/etcd/server/v3/storage/mvcc" "go.uber.org/zap" ) @@ -39,13 +43,26 @@ type uberApplier struct { applyV3base applierV3 } -func newUberApplier(s *EtcdServer) *uberApplier { - applyV3base_ := newApplierV3(s) +func newUberApplier( + lg *zap.Logger, + be backend.Backend, + kv mvcc.KV, + alarmStore *v3alarm.AlarmStore, + authStore auth.AuthStore, + lessor lease.Lessor, + cluster *membership.RaftCluster, + raftStatus RaftStatusGetter, + snapshotServer SnapshotServer, + consistentIndex cindex.ConsistentIndexer, + warningApplyDuration time.Duration, + txnModeWriteWithSharedBuffer bool, + quotaBackendBytesCfg int64) *uberApplier { + applyV3base_ := newApplierV3(lg, be, kv, alarmStore, authStore, lessor, cluster, raftStatus, snapshotServer, consistentIndex, txnModeWriteWithSharedBuffer, quotaBackendBytesCfg) ua := &uberApplier{ - lg: s.lg, - alarmStore: s.alarmStore, - warningApplyDuration: s.Cfg.WarningApplyDuration, + lg: lg, + alarmStore: alarmStore, + warningApplyDuration: warningApplyDuration, applyV3: applyV3base_, applyV3base: applyV3base_, } @@ -53,15 +70,24 @@ func newUberApplier(s *EtcdServer) *uberApplier { return ua } -func newApplierV3Backend(s *EtcdServer) applierV3 { - return &applierV3backend{s: s} -} - -func newApplierV3(s *EtcdServer) applierV3 { +func newApplierV3( + lg *zap.Logger, + be backend.Backend, + kv mvcc.KV, + alarmStore *v3alarm.AlarmStore, + authStore auth.AuthStore, + lessor lease.Lessor, + cluster *membership.RaftCluster, + raftStatus RaftStatusGetter, + snapshotServer SnapshotServer, + consistentIndex cindex.ConsistentIndexer, + txnModeWriteWithSharedBuffer bool, + quotaBackendBytesCfg int64) applierV3 { + applierBackend := newApplierV3Backend(lg, kv, alarmStore, authStore, lessor, cluster, raftStatus, snapshotServer, consistentIndex, txnModeWriteWithSharedBuffer) return newAuthApplierV3( - s.AuthStore(), - newQuotaApplierV3(s, newApplierV3Backend(s)), - s.lessor, + authStore, + newQuotaApplierV3(lg, quotaBackendBytesCfg, be, applierBackend), + lessor, ) } diff --git a/server/etcdserver/v3_server.go b/server/etcdserver/v3_server.go index 3dad86a63b4..bc35c2eff7f 100644 --- a/server/etcdserver/v3_server.go +++ b/server/etcdserver/v3_server.go @@ -392,6 +392,15 @@ func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveR return nil, ErrCanceled } +func (s *EtcdServer) newHeader() *pb.ResponseHeader { + return &pb.ResponseHeader{ + ClusterId: uint64(s.cluster.ID()), + MemberId: uint64(s.MemberId()), + Revision: s.KV().Rev(), + RaftTerm: s.Term(), + } +} + // LeaseLeases is really ListLeases !??? func (s *EtcdServer) LeaseLeases(_ context.Context, _ *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) { ls := s.lessor.Leases() @@ -399,7 +408,7 @@ func (s *EtcdServer) LeaseLeases(_ context.Context, _ *pb.LeaseLeasesRequest) (* for i := range ls { lss[i] = &pb.LeaseStatus{ID: int64(ls[i].ID)} } - return &pb.LeaseLeasesResponse{Header: newHeader(s), Leases: lss}, nil + return &pb.LeaseLeasesResponse{Header: s.newHeader(), Leases: lss}, nil } func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) { diff --git a/server/storage/quota.go b/server/storage/quota.go index 46b3506537b..f24ca987cb4 100644 --- a/server/storage/quota.go +++ b/server/storage/quota.go @@ -18,7 +18,6 @@ import ( "sync" pb "go.etcd.io/etcd/api/v3/etcdserverpb" - "go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/storage/backend" humanize "github.com/dustin/go-humanize" @@ -73,23 +72,21 @@ var ( ) // NewBackendQuota creates a quota layer with the given storage limit. -func NewBackendQuota(cfg config.ServerConfig, be backend.Backend, name string) Quota { - lg := cfg.Logger - quotaBackendBytes.Set(float64(cfg.QuotaBackendBytes)) - - if cfg.QuotaBackendBytes < 0 { +func NewBackendQuota(lg *zap.Logger, quotaBackendBytesCfg int64, be backend.Backend, name string) Quota { + quotaBackendBytes.Set(float64(quotaBackendBytesCfg)) + if quotaBackendBytesCfg < 0 { // disable quotas if negative quotaLogOnce.Do(func() { lg.Info( "disabled backend quota", zap.String("quota-name", name), - zap.Int64("quota-size-bytes", cfg.QuotaBackendBytes), + zap.Int64("quota-size-bytes", quotaBackendBytesCfg), ) }) return &passthroughQuota{} } - if cfg.QuotaBackendBytes == 0 { + if quotaBackendBytesCfg == 0 { // use default size if no quota size given quotaLogOnce.Do(func() { if lg != nil { @@ -106,12 +103,12 @@ func NewBackendQuota(cfg config.ServerConfig, be backend.Backend, name string) Q } quotaLogOnce.Do(func() { - if cfg.QuotaBackendBytes > MaxQuotaBytes { + if quotaBackendBytesCfg > MaxQuotaBytes { lg.Warn( "quota exceeds the maximum value", zap.String("quota-name", name), - zap.Int64("quota-size-bytes", cfg.QuotaBackendBytes), - zap.String("quota-size", humanize.Bytes(uint64(cfg.QuotaBackendBytes))), + zap.Int64("quota-size-bytes", quotaBackendBytesCfg), + zap.String("quota-size", humanize.Bytes(uint64(quotaBackendBytesCfg))), zap.Int64("quota-maximum-size-bytes", MaxQuotaBytes), zap.String("quota-maximum-size", maxQuotaSize), ) @@ -119,11 +116,11 @@ func NewBackendQuota(cfg config.ServerConfig, be backend.Backend, name string) Q lg.Info( "enabled backend quota", zap.String("quota-name", name), - zap.Int64("quota-size-bytes", cfg.QuotaBackendBytes), - zap.String("quota-size", humanize.Bytes(uint64(cfg.QuotaBackendBytes))), + zap.Int64("quota-size-bytes", quotaBackendBytesCfg), + zap.String("quota-size", humanize.Bytes(uint64(quotaBackendBytesCfg))), ) }) - return &BackendQuota{be, cfg.QuotaBackendBytes} + return &BackendQuota{be, quotaBackendBytesCfg} } func (b *BackendQuota) Available(v interface{}) bool { From e2ae9b1d135ece9ef53a2c508ea94a54fdd410e0 Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Sun, 3 Apr 2022 23:29:26 +0200 Subject: [PATCH 06/18] Move server/etcdserver/txn.go to new package: server/etcdserver/txn --- server/etcdserver/apply.go | 9 ++++---- server/etcdserver/errors.go | 4 +++- server/etcdserver/{ => txn}/txn.go | 37 ++++++++++++++++++++++++++++-- server/etcdserver/v3_server.go | 37 ++++-------------------------- 4 files changed, 48 insertions(+), 39 deletions(-) rename server/etcdserver/{ => txn}/txn.go (96%) diff --git a/server/etcdserver/apply.go b/server/etcdserver/apply.go index 5ccda706fef..ac2d33815a0 100644 --- a/server/etcdserver/apply.go +++ b/server/etcdserver/apply.go @@ -27,6 +27,7 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" "go.etcd.io/etcd/server/v3/etcdserver/cindex" + mvcc_txn "go.etcd.io/etcd/server/v3/etcdserver/txn" "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/lease" serverstorage "go.etcd.io/etcd/server/v3/storage" @@ -145,19 +146,19 @@ func (a *applierV3backend) WrapApply(ctx context.Context, r *pb.InternalRaftRequ } func (a *applierV3backend) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { - return Put(ctx, a.lg, a.lessor, a.kv, txn, p) + return mvcc_txn.Put(ctx, a.lg, a.lessor, a.kv, txn, p) } func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - return DeleteRange(a.kv, txn, dr) + return mvcc_txn.DeleteRange(a.kv, txn, dr) } func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { - return Range(ctx, a.lg, a.kv, txn, r) + return mvcc_txn.Range(ctx, a.lg, a.kv, txn, r) } func (a *applierV3backend) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - return Txn(ctx, a.lg, rt, a.txnModeWriteWithSharedBuffer, a.kv, a.lessor) + return mvcc_txn.Txn(ctx, a.lg, rt, a.txnModeWriteWithSharedBuffer, a.kv, a.lessor) } func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { diff --git a/server/etcdserver/errors.go b/server/etcdserver/errors.go index e28f49c1778..aa74739d803 100644 --- a/server/etcdserver/errors.go +++ b/server/etcdserver/errors.go @@ -17,6 +17,8 @@ package etcdserver import ( "errors" "fmt" + + "go.etcd.io/etcd/server/v3/etcdserver/txn" ) var ( @@ -37,11 +39,11 @@ var ( ErrNoSpace = errors.New("etcdserver: no space") ErrTooManyRequests = errors.New("etcdserver: too many requests") ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") - ErrKeyNotFound = errors.New("etcdserver: key not found") ErrCorrupt = errors.New("etcdserver: corrupt cluster") ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee") ErrClusterVersionUnavailable = errors.New("etcdserver: cluster version not found during downgrade") ErrWrongDowngradeVersionFormat = errors.New("etcdserver: wrong downgrade target version format") + ErrKeyNotFound = txn.ErrKeyNotFound ) type DiscoveryError struct { diff --git a/server/etcdserver/txn.go b/server/etcdserver/txn/txn.go similarity index 96% rename from server/etcdserver/txn.go rename to server/etcdserver/txn/txn.go index 747017093d9..3a5c2debf43 100644 --- a/server/etcdserver/txn.go +++ b/server/etcdserver/txn/txn.go @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -package etcdserver +package txn import ( "bytes" "context" + "errors" "sort" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -27,6 +28,10 @@ import ( "go.uber.org/zap" ) +var ( + ErrKeyNotFound = errors.New("etcdserver: key not found") +) + func Put(ctx context.Context, lg *zap.Logger, lessor lease.Lessor, kv mvcc.KV, txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { resp = &pb.PutResponse{} resp.Header = &pb.ResponseHeader{} @@ -221,7 +226,7 @@ func Txn(ctx context.Context, lg *zap.Logger, rt *pb.TxnRequest, txnModeWriteWit trace = traceutil.New("transaction", lg) ctx = context.WithValue(ctx, traceutil.TraceKey, trace) } - isWrite := !isTxnReadonly(rt) + isWrite := !IsTxnReadonly(rt) // When the transaction contains write operations, we use ReadTx instead of // ConcurrentReadTx to avoid extra overhead of copying buffer. @@ -595,3 +600,31 @@ func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool { } return true } + +func IsTxnSerializable(r *pb.TxnRequest) bool { + for _, u := range r.Success { + if r := u.GetRequestRange(); r == nil || !r.Serializable { + return false + } + } + for _, u := range r.Failure { + if r := u.GetRequestRange(); r == nil || !r.Serializable { + return false + } + } + return true +} + +func IsTxnReadonly(r *pb.TxnRequest) bool { + for _, u := range r.Success { + if r := u.GetRequestRange(); r == nil { + return false + } + } + for _, u := range r.Failure { + if r := u.GetRequestRange(); r == nil { + return false + } + } + return true +} diff --git a/server/etcdserver/v3_server.go b/server/etcdserver/v3_server.go index bc35c2eff7f..583ffa387ab 100644 --- a/server/etcdserver/v3_server.go +++ b/server/etcdserver/v3_server.go @@ -28,6 +28,7 @@ import ( "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/txn" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease/leasehttp" "go.etcd.io/etcd/server/v3/storage/mvcc" @@ -128,7 +129,7 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) } - get := func() { resp, err = Range(ctx, s.Logger(), s.KV(), nil, r) } + get := func() { resp, err = txn.Range(ctx, s.Logger(), s.KV(), nil, r) } if serr := s.doSerialize(ctx, chk, get); serr != nil { err = serr return nil, err @@ -154,13 +155,13 @@ func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) } func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if isTxnReadonly(r) { + if txn.IsTxnReadonly(r) { trace := traceutil.New("transaction", s.Logger(), traceutil.Field{Key: "read_only", Value: true}, ) ctx = context.WithValue(ctx, traceutil.TraceKey, trace) - if !isTxnSerializable(r) { + if !txn.IsTxnSerializable(r) { err := s.linearizableReadNotify(ctx) trace.Step("agreement among raft nodes before linearized reading") if err != nil { @@ -179,7 +180,7 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse }(time.Now()) get := func() { - resp, _, err = Txn(ctx, s.Logger(), r, s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer, s.KV(), s.lessor) + resp, _, err = txn.Txn(ctx, s.Logger(), r, s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer, s.KV(), s.lessor) } if serr := s.doSerialize(ctx, chk, get); serr != nil { return nil, serr @@ -195,34 +196,6 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse return resp.(*pb.TxnResponse), nil } -func isTxnSerializable(r *pb.TxnRequest) bool { - for _, u := range r.Success { - if r := u.GetRequestRange(); r == nil || !r.Serializable { - return false - } - } - for _, u := range r.Failure { - if r := u.GetRequestRange(); r == nil || !r.Serializable { - return false - } - } - return true -} - -func isTxnReadonly(r *pb.TxnRequest) bool { - for _, u := range r.Success { - if r := u.GetRequestRange(); r == nil { - return false - } - } - for _, u := range r.Failure { - if r := u.GetRequestRange(); r == nil { - return false - } - } - return true -} - func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) { startTime := time.Now() result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r}) From fc6a6c3c27cd82b8e3ae7db8fc0802e108a8b18b Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Fri, 13 May 2022 13:34:23 +0200 Subject: [PATCH 07/18] Move etcdserver/errors.go to sepatate package to avoid cyclic dependencies. --- server/etcdmain/etcd.go | 5 +- server/etcdserver/api/etcdhttp/peer.go | 3 +- server/etcdserver/api/etcdhttp/utils.go | 6 +-- server/etcdserver/api/v3rpc/maintenance.go | 3 +- server/etcdserver/api/v3rpc/util.go | 44 +++++++-------- server/etcdserver/apply.go | 13 ++--- server/etcdserver/apply_v2.go | 3 +- server/etcdserver/bootstrap.go | 3 +- server/etcdserver/cluster_util.go | 9 ++-- server/etcdserver/corrupt.go | 15 +++--- server/etcdserver/{ => etcderrors}/errors.go | 8 ++- server/etcdserver/server.go | 57 ++++++++++---------- server/etcdserver/server_test.go | 21 ++++---- server/etcdserver/txn/txn.go | 10 ++-- server/etcdserver/v2_server.go | 5 +- server/etcdserver/v3_server.go | 39 +++++++------- tests/functional/tester/stresser_key.go | 6 +-- 17 files changed, 127 insertions(+), 123 deletions(-) rename server/etcdserver/{ => etcderrors}/errors.go (94%) diff --git a/server/etcdmain/etcd.go b/server/etcdmain/etcd.go index 40be3188df0..2999fe13a6c 100644 --- a/server/etcdmain/etcd.go +++ b/server/etcdmain/etcd.go @@ -25,9 +25,8 @@ import ( "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/pkg/v3/osutil" "go.etcd.io/etcd/server/v3/embed" - "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery" - + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.uber.org/zap" "google.golang.org/grpc" ) @@ -127,7 +126,7 @@ func startEtcdOrProxyV2(args []string) { } if err != nil { - if derr, ok := err.(*etcdserver.DiscoveryError); ok { + if derr, ok := err.(*etcderrors.DiscoveryError); ok { switch derr.Err { case v2discovery.ErrDuplicateID: lg.Warn( diff --git a/server/etcdserver/api/etcdhttp/peer.go b/server/etcdserver/api/etcdhttp/peer.go index a64058b7159..3def6bae3fe 100644 --- a/server/etcdserver/api/etcdhttp/peer.go +++ b/server/etcdserver/api/etcdhttp/peer.go @@ -26,6 +26,7 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.etcd.io/etcd/server/v3/lease/leasehttp" "go.uber.org/zap" @@ -142,7 +143,7 @@ func (h *peerMemberPromoteHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ http.Error(w, err.Error(), http.StatusNotFound) case membership.ErrMemberNotLearner: http.Error(w, err.Error(), http.StatusPreconditionFailed) - case etcdserver.ErrLearnerNotReady: + case etcderrors.ErrLearnerNotReady: http.Error(w, err.Error(), http.StatusPreconditionFailed) default: writeError(h.lg, w, r, err) diff --git a/server/etcdserver/api/etcdhttp/utils.go b/server/etcdserver/api/etcdhttp/utils.go index 09957bfc1aa..a1e8176db90 100644 --- a/server/etcdserver/api/etcdhttp/utils.go +++ b/server/etcdserver/api/etcdhttp/utils.go @@ -17,7 +17,7 @@ package etcdhttp import ( "net/http" - "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" httptypes "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types" "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" "go.uber.org/zap" @@ -57,8 +57,8 @@ func writeError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err erro default: switch err { - case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, - etcdserver.ErrUnhealthy: + case etcderrors.ErrTimeoutDueToLeaderFail, etcderrors.ErrTimeoutDueToConnectionLost, etcderrors.ErrNotEnoughStartedMembers, + etcderrors.ErrUnhealthy: if lg != nil { lg.Warn( "v2 response error", diff --git a/server/etcdserver/api/v3rpc/maintenance.go b/server/etcdserver/api/v3rpc/maintenance.go index 10f03d19f88..ed5ce2b095c 100644 --- a/server/etcdserver/api/v3rpc/maintenance.go +++ b/server/etcdserver/api/v3rpc/maintenance.go @@ -27,6 +27,7 @@ import ( "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/storage/backend" "go.etcd.io/etcd/server/v3/storage/mvcc" @@ -241,7 +242,7 @@ func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) ( resp.StorageVersion = storageVersion.String() } if resp.Leader == raft.None { - resp.Errors = append(resp.Errors, etcdserver.ErrNoLeader.Error()) + resp.Errors = append(resp.Errors, etcderrors.ErrNoLeader.Error()) } for _, a := range ms.a.Alarms() { resp.Errors = append(resp.Errors, a.String()) diff --git a/server/etcdserver/api/v3rpc/util.go b/server/etcdserver/api/v3rpc/util.go index a4ddbe58e38..3d04d8cc40f 100644 --- a/server/etcdserver/api/v3rpc/util.go +++ b/server/etcdserver/api/v3rpc/util.go @@ -21,8 +21,8 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/storage/mvcc" @@ -38,30 +38,30 @@ var toGRPCErrorMap = map[error]error{ membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist, membership.ErrMemberNotLearner: rpctypes.ErrGRPCMemberNotLearner, membership.ErrTooManyLearners: rpctypes.ErrGRPCTooManyLearners, - etcdserver.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted, - etcdserver.ErrLearnerNotReady: rpctypes.ErrGRPCLearnerNotReady, + etcderrors.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted, + etcderrors.ErrLearnerNotReady: rpctypes.ErrGRPCLearnerNotReady, mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted, mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev, - etcdserver.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge, - etcdserver.ErrNoSpace: rpctypes.ErrGRPCNoSpace, - etcdserver.ErrTooManyRequests: rpctypes.ErrTooManyRequests, - - etcdserver.ErrNoLeader: rpctypes.ErrGRPCNoLeader, - etcdserver.ErrNotLeader: rpctypes.ErrGRPCNotLeader, - etcdserver.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged, - etcdserver.ErrStopped: rpctypes.ErrGRPCStopped, - etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout, - etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail, - etcdserver.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost, - etcdserver.ErrTimeoutWaitAppliedIndex: rpctypes.ErrGRPCTimeoutWaitAppliedIndex, - etcdserver.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy, - etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound, - etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt, - etcdserver.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee, - - etcdserver.ErrClusterVersionUnavailable: rpctypes.ErrGRPCClusterVersionUnavailable, - etcdserver.ErrWrongDowngradeVersionFormat: rpctypes.ErrGRPCWrongDowngradeVersionFormat, + etcderrors.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge, + etcderrors.ErrNoSpace: rpctypes.ErrGRPCNoSpace, + etcderrors.ErrTooManyRequests: rpctypes.ErrTooManyRequests, + + etcderrors.ErrNoLeader: rpctypes.ErrGRPCNoLeader, + etcderrors.ErrNotLeader: rpctypes.ErrGRPCNotLeader, + etcderrors.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged, + etcderrors.ErrStopped: rpctypes.ErrGRPCStopped, + etcderrors.ErrTimeout: rpctypes.ErrGRPCTimeout, + etcderrors.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail, + etcderrors.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost, + etcderrors.ErrTimeoutWaitAppliedIndex: rpctypes.ErrGRPCTimeoutWaitAppliedIndex, + etcderrors.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy, + etcderrors.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound, + etcderrors.ErrCorrupt: rpctypes.ErrGRPCCorrupt, + etcderrors.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee, + + etcderrors.ErrClusterVersionUnavailable: rpctypes.ErrGRPCClusterVersionUnavailable, + etcderrors.ErrWrongDowngradeVersionFormat: rpctypes.ErrGRPCWrongDowngradeVersionFormat, version.ErrInvalidDowngradeTargetVersion: rpctypes.ErrGRPCInvalidDowngradeTargetVersion, version.ErrDowngradeInProcess: rpctypes.ErrGRPCDowngradeInProcess, version.ErrNoInflightDowngrade: rpctypes.ErrGRPCNoInflightDowngrade, diff --git a/server/etcdserver/apply.go b/server/etcdserver/apply.go index ac2d33815a0..0dbec139bed 100644 --- a/server/etcdserver/apply.go +++ b/server/etcdserver/apply.go @@ -27,6 +27,7 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" "go.etcd.io/etcd/server/v3/etcdserver/cindex" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" mvcc_txn "go.etcd.io/etcd/server/v3/etcdserver/txn" "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/lease" @@ -242,18 +243,18 @@ type applierV3Capped struct { func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } func (a *applierV3Capped) Put(_ context.Context, _ mvcc.TxnWrite, _ *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { - return nil, nil, ErrNoSpace + return nil, nil, etcderrors.ErrNoSpace } func (a *applierV3Capped) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { if a.q.Cost(r) > 0 { - return nil, nil, ErrNoSpace + return nil, nil, etcderrors.ErrNoSpace } return a.applierV3.Txn(ctx, r) } func (a *applierV3Capped) LeaseGrant(_ *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return nil, ErrNoSpace + return nil, etcderrors.ErrNoSpace } func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) { @@ -437,7 +438,7 @@ func (a *quotaApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRe ok := a.q.Available(p) resp, trace, err := a.applierV3.Put(ctx, txn, p) if err == nil && !ok { - err = ErrNoSpace + err = etcderrors.ErrNoSpace } return resp, trace, err } @@ -446,7 +447,7 @@ func (a *quotaApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnRes ok := a.q.Available(rt) resp, trace, err := a.applierV3.Txn(ctx, rt) if err == nil && !ok { - err = ErrNoSpace + err = etcderrors.ErrNoSpace } return resp, trace, err } @@ -455,7 +456,7 @@ func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantRes ok := a.q.Available(lc) resp, err := a.applierV3.LeaseGrant(lc) if err == nil && !ok { - err = ErrNoSpace + err = etcderrors.ErrNoSpace } return resp, err } diff --git a/server/etcdserver/apply_v2.go b/server/etcdserver/apply_v2.go index c44e66d9761..538d1dc43a2 100644 --- a/server/etcdserver/apply_v2.go +++ b/server/etcdserver/apply_v2.go @@ -27,6 +27,7 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.uber.org/zap" ) @@ -146,7 +147,7 @@ func (s *EtcdServer) applyV2Request(r *RequestV2, shouldApplyV3 membership.Shoul return s.applyV2.Sync(r) default: // This should never be reached, but just in case: - return Response{Err: ErrUnknownMethod} + return Response{Err: etcderrors.ErrUnknownMethod} } } diff --git a/server/etcdserver/bootstrap.go b/server/etcdserver/bootstrap.go index 43605be5e62..f12670f15be 100644 --- a/server/etcdserver/bootstrap.go +++ b/server/etcdserver/bootstrap.go @@ -25,6 +25,7 @@ import ( "github.com/coreos/go-semver/semver" "github.com/dustin/go-humanize" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.uber.org/zap" "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -337,7 +338,7 @@ func bootstrapNewClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper) (* str, err = v3discovery.JoinCluster(cfg.Logger, &cfg.DiscoveryCfg, m.ID, cfg.InitialPeerURLsMap.String()) } if err != nil { - return nil, &DiscoveryError{Op: "join", Err: err} + return nil, &etcderrors.DiscoveryError{Op: "join", Err: err} } var urlsmap types.URLsMap urlsmap, err = types.NewURLsMap(str) diff --git a/server/etcdserver/cluster_util.go b/server/etcdserver/cluster_util.go index d4feaf9a22a..d96e8548bab 100644 --- a/server/etcdserver/cluster_util.go +++ b/server/etcdserver/cluster_util.go @@ -28,6 +28,7 @@ import ( "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "github.com/coreos/go-semver/semver" "go.uber.org/zap" @@ -304,12 +305,12 @@ func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.R } if resp.StatusCode == http.StatusRequestTimeout { - return nil, ErrTimeout + return nil, etcderrors.ErrTimeout } if resp.StatusCode == http.StatusPreconditionFailed { // both ErrMemberNotLearner and ErrLearnerNotReady have same http status code - if strings.Contains(string(b), ErrLearnerNotReady.Error()) { - return nil, ErrLearnerNotReady + if strings.Contains(string(b), etcderrors.ErrLearnerNotReady.Error()) { + return nil, etcderrors.ErrLearnerNotReady } if strings.Contains(string(b), membership.ErrMemberNotLearner.Error()) { return nil, membership.ErrMemberNotLearner @@ -408,7 +409,7 @@ func convertToClusterVersion(v string) (*semver.Version, error) { // allow input version format Major.Minor ver, err = semver.NewVersion(v + ".0") if err != nil { - return nil, ErrWrongDowngradeVersionFormat + return nil, etcderrors.ErrWrongDowngradeVersionFormat } } // cluster version only keeps major.minor, remove patch version diff --git a/server/etcdserver/corrupt.go b/server/etcdserver/corrupt.go index a1f06796c3f..9c9bd24f826 100644 --- a/server/etcdserver/corrupt.go +++ b/server/etcdserver/corrupt.go @@ -28,6 +28,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/pkg/v3/traceutil" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.etcd.io/etcd/server/v3/storage/mvcc" "go.uber.org/zap" @@ -310,31 +311,31 @@ type applierV3Corrupt struct { func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} } func (a *applierV3Corrupt) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { - return nil, nil, ErrCorrupt + return nil, nil, etcderrors.ErrCorrupt } func (a *applierV3Corrupt) Range(ctx context.Context, txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) { - return nil, ErrCorrupt + return nil, etcderrors.ErrCorrupt } func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - return nil, ErrCorrupt + return nil, etcderrors.ErrCorrupt } func (a *applierV3Corrupt) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - return nil, nil, ErrCorrupt + return nil, nil, etcderrors.ErrCorrupt } func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { - return nil, nil, nil, ErrCorrupt + return nil, nil, nil, etcderrors.ErrCorrupt } func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return nil, ErrCorrupt + return nil, etcderrors.ErrCorrupt } func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - return nil, ErrCorrupt + return nil, etcderrors.ErrCorrupt } const PeerHashKVPath = "/members/hashkv" diff --git a/server/etcdserver/errors.go b/server/etcdserver/etcderrors/errors.go similarity index 94% rename from server/etcdserver/errors.go rename to server/etcdserver/etcderrors/errors.go index aa74739d803..80102311811 100644 --- a/server/etcdserver/errors.go +++ b/server/etcdserver/etcderrors/errors.go @@ -1,4 +1,4 @@ -// Copyright 2015 The etcd Authors +// Copyright 2022 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,13 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package etcdserver +package etcderrors import ( "errors" "fmt" - - "go.etcd.io/etcd/server/v3/etcdserver/txn" ) var ( @@ -43,7 +41,7 @@ var ( ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee") ErrClusterVersionUnavailable = errors.New("etcdserver: cluster version not found during downgrade") ErrWrongDowngradeVersionFormat = errors.New("etcdserver: wrong downgrade target version format") - ErrKeyNotFound = txn.ErrKeyNotFound + ErrKeyNotFound = errors.New("etcdserver: key not found") ) type DiscoveryError struct { diff --git a/server/etcdserver/server.go b/server/etcdserver/server.go index 770654323c0..986298271f8 100644 --- a/server/etcdserver/server.go +++ b/server/etcdserver/server.go @@ -34,6 +34,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.etcd.io/etcd/pkg/v3/notify" "go.etcd.io/etcd/server/v3/config" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.uber.org/zap" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -1152,7 +1153,7 @@ func (s *EtcdServer) isLeader() bool { // MoveLeader transfers the leader to the given transferee. func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error { if !s.cluster.IsMemberExist(types.ID(transferee)) || s.cluster.Member(types.ID(transferee)).IsLearner { - return ErrBadLeaderTransferee + return etcderrors.ErrBadLeaderTransferee } now := time.Now() @@ -1170,7 +1171,7 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er for s.Lead() != transferee { select { case <-ctx.Done(): // time out - return ErrTimeoutLeaderTransfer + return etcderrors.ErrTimeoutLeaderTransfer case <-time.After(interval): } } @@ -1209,7 +1210,7 @@ func (s *EtcdServer) TransferLeadership() error { transferee, ok := longestConnected(s.r.transport, s.cluster.VotingMemberIDs()) if !ok { - return ErrUnhealthy + return etcderrors.ErrUnhealthy } tm := s.Cfg.ReqTimeout() @@ -1328,9 +1329,9 @@ func (s *EtcdServer) mayAddMember(memb membership.Member) error { "rejecting member add request; not enough healthy members", zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), - zap.Error(ErrNotEnoughStartedMembers), + zap.Error(etcderrors.ErrNotEnoughStartedMembers), ) - return ErrNotEnoughStartedMembers + return etcderrors.ErrNotEnoughStartedMembers } if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.MemberId(), s.cluster.VotingMembers()) { @@ -1338,9 +1339,9 @@ func (s *EtcdServer) mayAddMember(memb membership.Member) error { "rejecting member add request; local member has not been connected to all peers, reconfigure breaks active quorum", zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), - zap.Error(ErrUnhealthy), + zap.Error(etcderrors.ErrUnhealthy), ) - return ErrUnhealthy + return etcderrors.ErrUnhealthy } return nil @@ -1373,7 +1374,7 @@ func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membershi learnerPromoteSucceed.Inc() return resp, nil } - if err != ErrNotLeader { + if err != etcderrors.ErrNotLeader { learnerPromoteFailed.WithLabelValues(err.Error()).Inc() return resp, err } @@ -1392,16 +1393,16 @@ func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membershi return resp, nil } // If member promotion failed, return early. Otherwise keep retry. - if err == ErrLearnerNotReady || err == membership.ErrIDNotFound || err == membership.ErrMemberNotLearner { + if err == etcderrors.ErrLearnerNotReady || err == membership.ErrIDNotFound || err == membership.ErrMemberNotLearner { return nil, err } } } if cctx.Err() == context.DeadlineExceeded { - return nil, ErrTimeout + return nil, etcderrors.ErrTimeout } - return nil, ErrCanceled + return nil, etcderrors.ErrCanceled } // promoteMember checks whether the to-be-promoted learner node is ready before sending the promote @@ -1457,9 +1458,9 @@ func (s *EtcdServer) mayPromoteMember(id types.ID) error { "rejecting member promote request; not enough healthy members", zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-remove-id", id.String()), - zap.Error(ErrNotEnoughStartedMembers), + zap.Error(etcderrors.ErrNotEnoughStartedMembers), ) - return ErrNotEnoughStartedMembers + return etcderrors.ErrNotEnoughStartedMembers } return nil @@ -1473,7 +1474,7 @@ func (s *EtcdServer) isLearnerReady(id uint64) error { // leader's raftStatus.Progress is not nil if rs.Progress == nil { - return ErrNotLeader + return etcderrors.ErrNotLeader } var learnerMatch uint64 @@ -1492,7 +1493,7 @@ func (s *EtcdServer) isLearnerReady(id uint64) error { leaderMatch := rs.Progress[leaderID].Match // the learner's Match not caught up with leader yet if float64(learnerMatch) < float64(leaderMatch)*readyPercent { - return ErrLearnerNotReady + return etcderrors.ErrLearnerNotReady } } @@ -1516,9 +1517,9 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error { "rejecting member remove request; not enough healthy members", zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-remove-id", id.String()), - zap.Error(ErrNotEnoughStartedMembers), + zap.Error(etcderrors.ErrNotEnoughStartedMembers), ) - return ErrNotEnoughStartedMembers + return etcderrors.ErrNotEnoughStartedMembers } // downed member is safe to remove since it's not part of the active quorum @@ -1535,9 +1536,9 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error { zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-remove", id.String()), zap.Int("active-peers", active), - zap.Error(ErrUnhealthy), + zap.Error(etcderrors.ErrUnhealthy), ) - return ErrUnhealthy + return etcderrors.ErrUnhealthy } return nil @@ -1663,7 +1664,7 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*me return nil, s.parseProposeCtxErr(ctx.Err(), start) case <-s.stopping: - return nil, ErrStopped + return nil, etcderrors.ErrStopped } } @@ -1911,7 +1912,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { return } - if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { + if ar.err != etcderrors.ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { s.w.Trigger(id, ar) return } @@ -2194,7 +2195,7 @@ func (s *EtcdServer) updateClusterVersionV2(ver string) { lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver))) return - case ErrStopped: + case etcderrors.ErrStopped: lg.Warn("aborting cluster version update; server is stopped", zap.Error(err)) return @@ -2230,7 +2231,7 @@ func (s *EtcdServer) updateClusterVersionV3(ver string) { lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver))) return - case ErrStopped: + case etcderrors.ErrStopped: lg.Warn("aborting cluster version update; server is stopped", zap.Error(err)) return @@ -2263,7 +2264,7 @@ func (s *EtcdServer) monitorDowngrade() { func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { switch err { case context.Canceled: - return ErrCanceled + return etcderrors.ErrCanceled case context.DeadlineExceeded: s.leadTimeMu.RLock() @@ -2271,7 +2272,7 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { s.leadTimeMu.RUnlock() prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) if start.After(prevLeadLost) && start.Before(curLeadElected) { - return ErrTimeoutDueToLeaderFail + return etcderrors.ErrTimeoutDueToLeaderFail } lead := types.ID(s.getLead()) switch lead { @@ -2279,14 +2280,14 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { // TODO: return error to specify it happens because the cluster does not have leader now case s.MemberId(): if !isConnectedToQuorumSince(s.r.transport, start, s.MemberId(), s.cluster.Members()) { - return ErrTimeoutDueToConnectionLost + return etcderrors.ErrTimeoutDueToConnectionLost } default: if !isConnectedSince(s.r.transport, start, lead) { - return ErrTimeoutDueToConnectionLost + return etcderrors.ErrTimeoutDueToConnectionLost } } - return ErrTimeout + return etcderrors.ErrTimeout default: return err diff --git a/server/etcdserver/server_test.go b/server/etcdserver/server_test.go index dcad5c23c9b..effbef437e6 100644 --- a/server/etcdserver/server_test.go +++ b/server/etcdserver/server_test.go @@ -47,6 +47,7 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api/snap" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" "go.etcd.io/etcd/server/v3/etcdserver/cindex" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/mock/mockstorage" "go.etcd.io/etcd/server/v3/mock/mockstore" @@ -95,7 +96,7 @@ func TestDoLocalAction(t *testing.T) { }, { pb.Request{Method: "BADMETHOD", ID: 1}, - Response{}, ErrUnknownMethod, []testutil.Action{}, + Response{}, etcderrors.ErrUnknownMethod, []testutil.Action{}, }, } for i, tt := range tests { @@ -461,7 +462,7 @@ func TestApplyRequest(t *testing.T) { // Unknown method - error { pb.Request{Method: "BADMETHOD", ID: 1}, - Response{Err: ErrUnknownMethod}, + Response{Err: etcderrors.ErrUnknownMethod}, []testutil.Action{}, }, } @@ -828,8 +829,8 @@ func TestDoProposalCancelled(t *testing.T) { cancel() _, err := srv.Do(ctx, pb.Request{Method: "PUT"}) - if err != ErrCanceled { - t.Fatalf("err = %v, want %v", err, ErrCanceled) + if err != etcderrors.ErrCanceled { + t.Fatalf("err = %v, want %v", err, etcderrors.ErrCanceled) } w := []testutil.Action{{Name: "Register"}, {Name: "Trigger"}} if !reflect.DeepEqual(wt.Action(), w) { @@ -851,8 +852,8 @@ func TestDoProposalTimeout(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 0) _, err := srv.Do(ctx, pb.Request{Method: "PUT"}) cancel() - if err != ErrTimeout { - t.Fatalf("err = %v, want %v", err, ErrTimeout) + if err != etcderrors.ErrTimeout { + t.Fatalf("err = %v, want %v", err, etcderrors.ErrTimeout) } } @@ -870,8 +871,8 @@ func TestDoProposalStopped(t *testing.T) { srv.stopping = make(chan struct{}) close(srv.stopping) _, err := srv.Do(context.Background(), pb.Request{Method: "PUT", ID: 1}) - if err != ErrStopped { - t.Errorf("err = %v, want %v", err, ErrStopped) + if err != etcderrors.ErrStopped { + t.Errorf("err = %v, want %v", err, etcderrors.ErrStopped) } } @@ -1942,14 +1943,14 @@ func TestWaitAppliedIndex(t *testing.T) { action: func(s *EtcdServer) { s.stopping <- struct{}{} }, - ExpectedError: ErrStopped, + ExpectedError: etcderrors.ErrStopped, }, { name: "Timed out waiting for the applied index", appliedIndex: 10, committedIndex: 12, action: nil, - ExpectedError: ErrTimeoutWaitAppliedIndex, + ExpectedError: etcderrors.ErrTimeoutWaitAppliedIndex, }, } for _, tc := range cases { diff --git a/server/etcdserver/txn/txn.go b/server/etcdserver/txn/txn.go index 3a5c2debf43..ab33ebd9b57 100644 --- a/server/etcdserver/txn/txn.go +++ b/server/etcdserver/txn/txn.go @@ -17,21 +17,17 @@ package txn import ( "bytes" "context" - "errors" "sort" pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/pkg/v3/traceutil" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/storage/mvcc" "go.uber.org/zap" ) -var ( - ErrKeyNotFound = errors.New("etcdserver: key not found") -) - func Put(ctx context.Context, lg *zap.Logger, lessor lease.Lessor, kv mvcc.KV, txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { resp = &pb.PutResponse{} resp.Header = &pb.ResponseHeader{} @@ -68,7 +64,7 @@ func Put(ctx context.Context, lg *zap.Logger, lessor lease.Lessor, kv mvcc.KV, t if p.IgnoreValue || p.IgnoreLease { if rr == nil || len(rr.KVs) == 0 { // ignore_{lease,value} flag expects previous key-value pair - return nil, nil, ErrKeyNotFound + return nil, nil, etcderrors.ErrKeyNotFound } } if p.IgnoreValue { @@ -381,7 +377,7 @@ func checkRequestPut(rv mvcc.ReadView, lessor lease.Lessor, reqOp *pb.RequestOp) return err } if rr == nil || len(rr.KVs) == 0 { - return ErrKeyNotFound + return etcderrors.ErrKeyNotFound } } if lease.LeaseID(req.Lease) != lease.NoLease { diff --git a/server/etcdserver/v2_server.go b/server/etcdserver/v2_server.go index 24c97c924a5..22709762ee2 100644 --- a/server/etcdserver/v2_server.go +++ b/server/etcdserver/v2_server.go @@ -21,6 +21,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" ) type RequestV2 pb.Request @@ -116,7 +117,7 @@ func (a *reqV2HandlerEtcdServer) processRaftRequest(ctx context.Context, r *Requ return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start) case <-a.s.stopping: } - return Response{}, ErrStopped + return Response{}, etcderrors.ErrStopped } func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) { @@ -157,7 +158,7 @@ func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Respons case "HEAD": return v2api.Head(ctx, r) } - return Response{}, ErrUnknownMethod + return Response{}, etcderrors.ErrUnknownMethod } func (r *RequestV2) String() string { diff --git a/server/etcdserver/v3_server.go b/server/etcdserver/v3_server.go index 583ffa387ab..5d2c4365de6 100644 --- a/server/etcdserver/v3_server.go +++ b/server/etcdserver/v3_server.go @@ -28,6 +28,7 @@ import ( "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.etcd.io/etcd/server/v3/etcdserver/txn" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease/leasehttp" @@ -258,9 +259,9 @@ func (s *EtcdServer) waitAppliedIndex() error { select { case <-s.ApplyWait(): case <-s.stopping: - return ErrStopped + return etcderrors.ErrStopped case <-time.After(applyTimeout): - return ErrTimeoutWaitAppliedIndex + return etcderrors.ErrTimeoutWaitAppliedIndex } return nil @@ -310,9 +311,9 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e } if cctx.Err() == context.DeadlineExceeded { - return -1, ErrTimeout + return -1, etcderrors.ErrTimeout } - return -1, ErrCanceled + return -1, etcderrors.ErrCanceled } func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { @@ -360,9 +361,9 @@ func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveR } if cctx.Err() == context.DeadlineExceeded { - return nil, ErrTimeout + return nil, etcderrors.ErrTimeout } - return nil, ErrCanceled + return nil, etcderrors.ErrCanceled } func (s *EtcdServer) newHeader() *pb.ResponseHeader { @@ -393,13 +394,13 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) case <-time.After(dur): leader = s.cluster.Member(s.Leader()) case <-s.stopping: - return nil, ErrStopped + return nil, etcderrors.ErrStopped case <-ctx.Done(): - return nil, ErrNoLeader + return nil, etcderrors.ErrNoLeader } } if len(leader.PeerURLs) == 0 { - return nil, ErrNoLeader + return nil, etcderrors.ErrNoLeader } return leader, nil } @@ -658,7 +659,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In ai := s.getAppliedIndex() ci := s.getCommittedIndex() if ci > ai+maxGapBetweenApplyAndCommitIndex { - return nil, ErrTooManyRequests + return nil, etcderrors.ErrTooManyRequests } r.Header = &pb.RequestHeader{ @@ -683,7 +684,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In } if len(data) > int(s.Cfg.MaxRequestBytes) { - return nil, ErrRequestTooLarge + return nil, etcderrors.ErrRequestTooLarge } id := r.ID @@ -713,7 +714,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In s.w.Trigger(id, nil) // GC wait return nil, s.parseProposeCtxErr(cctx.Err(), start) case <-s.done: - return nil, ErrStopped + return nil, etcderrors.ErrStopped } } @@ -774,7 +775,7 @@ func (s *EtcdServer) linearizableReadLoop() { } func isStopped(err error) bool { - return err == raft.ErrStopped || err == ErrStopped + return err == raft.ErrStopped || err == etcderrors.ErrStopped } func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, requestId uint64) (uint64, error) { @@ -815,7 +816,7 @@ func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, case <-leaderChangedNotifier: readIndexFailed.Inc() // return a retryable error. - return 0, ErrLeaderChanged + return 0, etcderrors.ErrLeaderChanged case <-firstCommitInTermNotifier: firstCommitInTermNotifier = s.firstCommitInTerm.Receive() lg.Info("first commit in current term: resending ReadIndex request") @@ -843,9 +844,9 @@ func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, zap.Duration("timeout", s.Cfg.ReqTimeout()), ) slowReadIndex.Inc() - return 0, ErrTimeout + return 0, etcderrors.ErrTimeout case <-s.stopping: - return 0, ErrStopped + return 0, etcderrors.ErrStopped } } } @@ -896,7 +897,7 @@ func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error { case <-ctx.Done(): return ctx.Err() case <-s.done: - return ErrStopped + return etcderrors.ErrStopped } } @@ -921,7 +922,7 @@ func (s *EtcdServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb case pb.DowngradeRequest_CANCEL: return s.downgradeCancel(ctx) default: - return nil, ErrUnknownMethod + return nil, etcderrors.ErrUnknownMethod } } @@ -935,7 +936,7 @@ func (s *EtcdServer) downgradeValidate(ctx context.Context, v string) (*pb.Downg cv := s.ClusterVersion() if cv == nil { - return nil, ErrClusterVersionUnavailable + return nil, etcderrors.ErrClusterVersionUnavailable } resp.Version = version.Cluster(cv.String()) err = s.Version().DowngradeValidate(ctx, targetVersion) diff --git a/tests/functional/tester/stresser_key.go b/tests/functional/tester/stresser_key.go index 007a9535e7a..aabf8cdf45c 100644 --- a/tests/functional/tester/stresser_key.go +++ b/tests/functional/tester/stresser_key.go @@ -26,7 +26,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/raft/v3" - "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.etcd.io/etcd/tests/v3/functional/rpcpb" "go.uber.org/zap" @@ -153,13 +153,13 @@ func (s *keyStresser) isRetryableError(err error) bool { // as well. We want to keep stressing until the cluster elects a // new leader and start processing requests again. return true - case etcdserver.ErrTimeoutDueToLeaderFail.Error(), etcdserver.ErrTimeout.Error(): + case etcderrors.ErrTimeoutDueToLeaderFail.Error(), etcderrors.ErrTimeout.Error(): // This retries when request is triggered at the same time as // leader failure and follower nodes receive time out errors // from losing their leader. Followers should retry to connect // to the new leader. return true - case etcdserver.ErrStopped.Error(): + case etcderrors.ErrStopped.Error(): // one of the etcd nodes stopped from failure injection return true case rpctypes.ErrNotCapable.Error(): From 47a771871b970fe074c6b88854a2e3ab8d0024c0 Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Fri, 22 Apr 2022 12:38:41 +0200 Subject: [PATCH 08/18] Move apply to its own package (no dependency on etcdserver). --- server/etcdserver/api/v3rpc/header.go | 3 +- server/etcdserver/api/v3rpc/maintenance.go | 3 +- server/etcdserver/api/v3rpc/watch.go | 5 +- server/etcdserver/{ => apply}/apply.go | 53 +++++------- server/etcdserver/{ => apply}/apply_auth.go | 10 +-- server/etcdserver/apply/corrupt.go | 58 +++++++++++++ server/etcdserver/{ => apply}/uber_applier.go | 86 +++++++++---------- server/etcdserver/corrupt.go | 36 -------- server/etcdserver/server.go | 50 +++++++---- server/etcdserver/server_test.go | 3 +- server/etcdserver/v3_server.go | 41 ++++----- 11 files changed, 188 insertions(+), 160 deletions(-) rename server/etcdserver/{ => apply}/apply.go (94%) rename server/etcdserver/{ => apply}/apply_auth.go (96%) create mode 100644 server/etcdserver/apply/corrupt.go rename server/etcdserver/{ => apply}/uber_applier.go (69%) diff --git a/server/etcdserver/api/v3rpc/header.go b/server/etcdserver/api/v3rpc/header.go index 48886229284..0ba83ab604d 100644 --- a/server/etcdserver/api/v3rpc/header.go +++ b/server/etcdserver/api/v3rpc/header.go @@ -17,12 +17,13 @@ package v3rpc import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/server/v3/etcdserver" + apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply" ) type header struct { clusterID int64 memberID int64 - sg etcdserver.RaftStatusGetter + sg apply2.RaftStatusGetter rev func() int64 } diff --git a/server/etcdserver/api/v3rpc/maintenance.go b/server/etcdserver/api/v3rpc/maintenance.go index ed5ce2b095c..54a4106046c 100644 --- a/server/etcdserver/api/v3rpc/maintenance.go +++ b/server/etcdserver/api/v3rpc/maintenance.go @@ -27,6 +27,7 @@ import ( "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver" + "go.etcd.io/etcd/server/v3/etcdserver/apply" "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/storage/backend" @@ -70,7 +71,7 @@ type ClusterStatusGetter interface { type maintenanceServer struct { lg *zap.Logger - rg etcdserver.RaftStatusGetter + rg apply.RaftStatusGetter kg KVGetter bg BackendGetter a Alarmer diff --git a/server/etcdserver/api/v3rpc/watch.go b/server/etcdserver/api/v3rpc/watch.go index 4da07274e2a..543921c0459 100644 --- a/server/etcdserver/api/v3rpc/watch.go +++ b/server/etcdserver/api/v3rpc/watch.go @@ -26,6 +26,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver" + apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply" "go.etcd.io/etcd/server/v3/storage/mvcc" "go.uber.org/zap" @@ -41,7 +42,7 @@ type watchServer struct { maxRequestBytes int - sg etcdserver.RaftStatusGetter + sg apply2.RaftStatusGetter watchable mvcc.WatchableKV ag AuthGetter } @@ -124,7 +125,7 @@ type serverWatchStream struct { maxRequestBytes int - sg etcdserver.RaftStatusGetter + sg apply2.RaftStatusGetter watchable mvcc.WatchableKV ag AuthGetter diff --git a/server/etcdserver/apply.go b/server/etcdserver/apply/apply.go similarity index 94% rename from server/etcdserver/apply.go rename to server/etcdserver/apply/apply.go index 0dbec139bed..623f9a5b550 100644 --- a/server/etcdserver/apply.go +++ b/server/etcdserver/apply/apply.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package etcdserver +package apply import ( "context" @@ -43,22 +43,31 @@ const ( v3Version = "v3" ) -type applyResult struct { - resp proto.Message - err error - // physc signals the physical effect of the request has completed in addition +// RaftStatusGetter represents etcd server and Raft progress. +type RaftStatusGetter interface { + MemberId() types.ID + Leader() types.ID + CommittedIndex() uint64 + AppliedIndex() uint64 + Term() uint64 +} + +type ApplyResult struct { + Resp proto.Message + Err error + // Physc signals the physical effect of the request has completed in addition // to being logically reflected by the node. Currently, only used for // Compaction requests. - physc <-chan struct{} - trace *traceutil.Trace + Physc <-chan struct{} + Trace *traceutil.Trace } -type ApplyFunc func(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult +type ApplyFunc func(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult // applierV3 is the interface for processing V3 raft messages type applierV3 interface { - WrapApply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc ApplyFunc) *applyResult - //Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult + WrapApply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc ApplyFunc) *ApplyResult + //Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) @@ -142,7 +151,7 @@ func newApplierV3Backend( txnModeWriteWithSharedBuffer: txnModeWriteWithSharedBuffer} } -func (a *applierV3backend) WrapApply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc ApplyFunc) *applyResult { +func (a *applierV3backend) WrapApply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc ApplyFunc) *ApplyResult { return applyFunc(ctx, r, shouldApplyV3) } @@ -461,28 +470,6 @@ func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantRes return resp, err } -func noSideEffect(r *pb.InternalRaftRequest) bool { - return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil || r.AuthStatus != nil -} - -func removeNeedlessRangeReqs(txn *pb.TxnRequest) { - f := func(ops []*pb.RequestOp) []*pb.RequestOp { - j := 0 - for i := 0; i < len(ops); i++ { - if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok { - continue - } - ops[j] = ops[i] - j++ - } - - return ops[:j] - } - - txn.Success = f(txn.Success) - txn.Failure = f(txn.Failure) -} - func (a *applierV3backend) newHeader() *pb.ResponseHeader { return &pb.ResponseHeader{ ClusterId: uint64(a.cluster.ID()), diff --git a/server/etcdserver/apply_auth.go b/server/etcdserver/apply/apply_auth.go similarity index 96% rename from server/etcdserver/apply_auth.go rename to server/etcdserver/apply/apply_auth.go index f11625e4d6f..c42ec181fbc 100644 --- a/server/etcdserver/apply_auth.go +++ b/server/etcdserver/apply/apply_auth.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package etcdserver +package apply import ( "context" @@ -42,7 +42,7 @@ func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *a return &authApplierV3{applierV3: base, as: as, lessor: lessor} } -func (aa *authApplierV3) WrapApply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc ApplyFunc) *applyResult { +func (aa *authApplierV3) WrapApply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc ApplyFunc) *ApplyResult { aa.mu.Lock() defer aa.mu.Unlock() if r.Header != nil { @@ -55,7 +55,7 @@ func (aa *authApplierV3) WrapApply(ctx context.Context, r *pb.InternalRaftReques if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil { aa.authInfo.Username = "" aa.authInfo.Revision = 0 - return &applyResult{err: err} + return &ApplyResult{Err: err} } } ret := aa.applierV3.WrapApply(ctx, r, shouldApplyV3, applyFunc) @@ -150,7 +150,7 @@ func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.Req return nil } -func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error { +func CheckTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error { for _, c := range rt.Compare { if err := as.IsRangePermitted(ai, c.Key, c.RangeEnd); err != nil { return err @@ -163,7 +163,7 @@ func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error } func (aa *authApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil { + if err := CheckTxnAuth(aa.as, &aa.authInfo, rt); err != nil { return nil, nil, err } return aa.applierV3.Txn(ctx, rt) diff --git a/server/etcdserver/apply/corrupt.go b/server/etcdserver/apply/corrupt.go new file mode 100644 index 00000000000..32620cde27b --- /dev/null +++ b/server/etcdserver/apply/corrupt.go @@ -0,0 +1,58 @@ +// Copyright 2022 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apply + +import ( + "context" + + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/pkg/v3/traceutil" + "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/storage/mvcc" +) + +type applierV3Corrupt struct { + applierV3 +} + +func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} } + +func (a *applierV3Corrupt) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { + return nil, nil, etcderrors.ErrCorrupt +} + +func (a *applierV3Corrupt) Range(ctx context.Context, txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) { + return nil, etcderrors.ErrCorrupt +} + +func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { + return nil, etcderrors.ErrCorrupt +} + +func (a *applierV3Corrupt) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { + return nil, nil, etcderrors.ErrCorrupt +} + +func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { + return nil, nil, nil, etcderrors.ErrCorrupt +} + +func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { + return nil, etcderrors.ErrCorrupt +} + +func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + return nil, etcderrors.ErrCorrupt +} diff --git a/server/etcdserver/uber_applier.go b/server/etcdserver/apply/uber_applier.go similarity index 69% rename from server/etcdserver/uber_applier.go rename to server/etcdserver/apply/uber_applier.go index c3d6d968621..f45643ad4ce 100644 --- a/server/etcdserver/uber_applier.go +++ b/server/etcdserver/apply/uber_applier.go @@ -12,11 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -package etcdserver +package apply import ( "context" - "strconv" "time" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -30,7 +29,7 @@ import ( "go.uber.org/zap" ) -type uberApplier struct { +type UberApplier struct { lg *zap.Logger alarmStore *v3alarm.AlarmStore @@ -43,7 +42,7 @@ type uberApplier struct { applyV3base applierV3 } -func newUberApplier( +func NewUberApplier( lg *zap.Logger, be backend.Backend, kv mvcc.KV, @@ -56,10 +55,10 @@ func newUberApplier( consistentIndex cindex.ConsistentIndexer, warningApplyDuration time.Duration, txnModeWriteWithSharedBuffer bool, - quotaBackendBytesCfg int64) *uberApplier { + quotaBackendBytesCfg int64) *UberApplier { applyV3base_ := newApplierV3(lg, be, kv, alarmStore, authStore, lessor, cluster, raftStatus, snapshotServer, consistentIndex, txnModeWriteWithSharedBuffer, quotaBackendBytesCfg) - ua := &uberApplier{ + ua := &UberApplier{ lg: lg, alarmStore: alarmStore, warningApplyDuration: warningApplyDuration, @@ -91,7 +90,7 @@ func newApplierV3( ) } -func (a *uberApplier) RestoreAlarms() { +func (a *UberApplier) RestoreAlarms() { noSpaceAlarms := len(a.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 corruptAlarms := len(a.alarmStore.Get(pb.AlarmType_CORRUPT)) > 0 a.applyV3 = a.applyV3base @@ -103,21 +102,22 @@ func (a *uberApplier) RestoreAlarms() { } } -func (a *uberApplier) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult { +func (a *UberApplier) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult { return a.applyV3.WrapApply(context.TODO(), r, shouldApplyV3, a.dispatch) } // This function -func (a *uberApplier) dispatch(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *applyResult { +func (a *UberApplier) dispatch(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult { op := "unknown" - ar := &applyResult{} + ar := &ApplyResult{} defer func(start time.Time) { - success := ar.err == nil || ar.err == mvcc.ErrCompacted - applySec.WithLabelValues(v3Version, op, strconv.FormatBool(success)).Observe(time.Since(start).Seconds()) - warnOfExpensiveRequest(a.lg, a.warningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err) - if !success { - warnOfFailedRequest(a.lg, start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err) - } + op += " " + // success := ar.Err == nil || ar.Err == mvcc.ErrCompacted + //etcdserver.applySec.WithLabelValues(v3Version, op, strconv.FormatBool(success)).Observe(time.Since(start).Seconds()) + //etcdserver.warnOfExpensiveRequest(a.lg, a.warningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.Resp, ar.Err) + //if !success { + // etcdserver.warnOfFailedRequest(a.lg, start, &pb.InternalRaftStringer{Request: r}, ar.Resp, ar.Err) + //} }(time.Now()) switch { @@ -143,88 +143,88 @@ func (a *uberApplier) dispatch(ctx context.Context, r *pb.InternalRaftRequest, s switch { case r.Range != nil: op = "Range" - ar.resp, ar.err = a.applyV3.Range(ctx, nil, r.Range) + ar.Resp, ar.Err = a.applyV3.Range(ctx, nil, r.Range) case r.Put != nil: op = "Put" - ar.resp, ar.trace, ar.err = a.applyV3.Put(ctx, nil, r.Put) + ar.Resp, ar.Trace, ar.Err = a.applyV3.Put(ctx, nil, r.Put) case r.DeleteRange != nil: op = "DeleteRange" - ar.resp, ar.err = a.applyV3.DeleteRange(nil, r.DeleteRange) + ar.Resp, ar.Err = a.applyV3.DeleteRange(nil, r.DeleteRange) case r.Txn != nil: op = "Txn" - ar.resp, ar.trace, ar.err = a.applyV3.Txn(ctx, r.Txn) + ar.Resp, ar.Trace, ar.Err = a.applyV3.Txn(ctx, r.Txn) case r.Compaction != nil: op = "Compaction" - ar.resp, ar.physc, ar.trace, ar.err = a.applyV3.Compaction(r.Compaction) + ar.Resp, ar.Physc, ar.Trace, ar.Err = a.applyV3.Compaction(r.Compaction) case r.LeaseGrant != nil: op = "LeaseGrant" - ar.resp, ar.err = a.applyV3.LeaseGrant(r.LeaseGrant) + ar.Resp, ar.Err = a.applyV3.LeaseGrant(r.LeaseGrant) case r.LeaseRevoke != nil: op = "LeaseRevoke" - ar.resp, ar.err = a.applyV3.LeaseRevoke(r.LeaseRevoke) + ar.Resp, ar.Err = a.applyV3.LeaseRevoke(r.LeaseRevoke) case r.LeaseCheckpoint != nil: op = "LeaseCheckpoint" - ar.resp, ar.err = a.applyV3.LeaseCheckpoint(r.LeaseCheckpoint) + ar.Resp, ar.Err = a.applyV3.LeaseCheckpoint(r.LeaseCheckpoint) case r.Alarm != nil: op = "Alarm" - ar.resp, ar.err = a.Alarm(r.Alarm) + ar.Resp, ar.Err = a.Alarm(r.Alarm) case r.Authenticate != nil: op = "Authenticate" - ar.resp, ar.err = a.applyV3.Authenticate(r.Authenticate) + ar.Resp, ar.Err = a.applyV3.Authenticate(r.Authenticate) case r.AuthEnable != nil: op = "AuthEnable" - ar.resp, ar.err = a.applyV3.AuthEnable() + ar.Resp, ar.Err = a.applyV3.AuthEnable() case r.AuthDisable != nil: op = "AuthDisable" - ar.resp, ar.err = a.applyV3.AuthDisable() + ar.Resp, ar.Err = a.applyV3.AuthDisable() case r.AuthStatus != nil: - ar.resp, ar.err = a.applyV3.AuthStatus() + ar.Resp, ar.Err = a.applyV3.AuthStatus() case r.AuthUserAdd != nil: op = "AuthUserAdd" - ar.resp, ar.err = a.applyV3.UserAdd(r.AuthUserAdd) + ar.Resp, ar.Err = a.applyV3.UserAdd(r.AuthUserAdd) case r.AuthUserDelete != nil: op = "AuthUserDelete" - ar.resp, ar.err = a.applyV3.UserDelete(r.AuthUserDelete) + ar.Resp, ar.Err = a.applyV3.UserDelete(r.AuthUserDelete) case r.AuthUserChangePassword != nil: op = "AuthUserChangePassword" - ar.resp, ar.err = a.applyV3.UserChangePassword(r.AuthUserChangePassword) + ar.Resp, ar.Err = a.applyV3.UserChangePassword(r.AuthUserChangePassword) case r.AuthUserGrantRole != nil: op = "AuthUserGrantRole" - ar.resp, ar.err = a.applyV3.UserGrantRole(r.AuthUserGrantRole) + ar.Resp, ar.Err = a.applyV3.UserGrantRole(r.AuthUserGrantRole) case r.AuthUserGet != nil: op = "AuthUserGet" - ar.resp, ar.err = a.applyV3.UserGet(r.AuthUserGet) + ar.Resp, ar.Err = a.applyV3.UserGet(r.AuthUserGet) case r.AuthUserRevokeRole != nil: op = "AuthUserRevokeRole" - ar.resp, ar.err = a.applyV3.UserRevokeRole(r.AuthUserRevokeRole) + ar.Resp, ar.Err = a.applyV3.UserRevokeRole(r.AuthUserRevokeRole) case r.AuthRoleAdd != nil: op = "AuthRoleAdd" - ar.resp, ar.err = a.applyV3.RoleAdd(r.AuthRoleAdd) + ar.Resp, ar.Err = a.applyV3.RoleAdd(r.AuthRoleAdd) case r.AuthRoleGrantPermission != nil: op = "AuthRoleGrantPermission" - ar.resp, ar.err = a.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission) + ar.Resp, ar.Err = a.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission) case r.AuthRoleGet != nil: op = "AuthRoleGet" - ar.resp, ar.err = a.applyV3.RoleGet(r.AuthRoleGet) + ar.Resp, ar.Err = a.applyV3.RoleGet(r.AuthRoleGet) case r.AuthRoleRevokePermission != nil: op = "AuthRoleRevokePermission" - ar.resp, ar.err = a.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission) + ar.Resp, ar.Err = a.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission) case r.AuthRoleDelete != nil: op = "AuthRoleDelete" - ar.resp, ar.err = a.applyV3.RoleDelete(r.AuthRoleDelete) + ar.Resp, ar.Err = a.applyV3.RoleDelete(r.AuthRoleDelete) case r.AuthUserList != nil: op = "AuthUserList" - ar.resp, ar.err = a.applyV3.UserList(r.AuthUserList) + ar.Resp, ar.Err = a.applyV3.UserList(r.AuthUserList) case r.AuthRoleList != nil: op = "AuthRoleList" - ar.resp, ar.err = a.applyV3.RoleList(r.AuthRoleList) + ar.Resp, ar.Err = a.applyV3.RoleList(r.AuthRoleList) default: a.lg.Panic("not implemented apply", zap.Stringer("raft-request", r)) } return ar } -func (a *uberApplier) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { +func (a *UberApplier) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { resp, err := a.applyV3.Alarm(ar) if ar.Action == pb.AlarmRequest_ACTIVATE || diff --git a/server/etcdserver/corrupt.go b/server/etcdserver/corrupt.go index 9c9bd24f826..133bcafb690 100644 --- a/server/etcdserver/corrupt.go +++ b/server/etcdserver/corrupt.go @@ -27,8 +27,6 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/pkg/v3/types" - "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.etcd.io/etcd/server/v3/storage/mvcc" "go.uber.org/zap" @@ -304,40 +302,6 @@ func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp { return resps } -type applierV3Corrupt struct { - applierV3 -} - -func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} } - -func (a *applierV3Corrupt) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { - return nil, nil, etcderrors.ErrCorrupt -} - -func (a *applierV3Corrupt) Range(ctx context.Context, txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) { - return nil, etcderrors.ErrCorrupt -} - -func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - return nil, etcderrors.ErrCorrupt -} - -func (a *applierV3Corrupt) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - return nil, nil, etcderrors.ErrCorrupt -} - -func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { - return nil, nil, nil, etcderrors.ErrCorrupt -} - -func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return nil, etcderrors.ErrCorrupt -} - -func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - return nil, etcderrors.ErrCorrupt -} - const PeerHashKVPath = "/members/hashkv" type hashKVHandler struct { diff --git a/server/etcdserver/server.go b/server/etcdserver/server.go index 986298271f8..5f0ddb69adb 100644 --- a/server/etcdserver/server.go +++ b/server/etcdserver/server.go @@ -33,7 +33,9 @@ import ( humanize "github.com/dustin/go-humanize" "github.com/prometheus/client_golang/prometheus" "go.etcd.io/etcd/pkg/v3/notify" + "go.etcd.io/etcd/pkg/v3/runtime" "go.etcd.io/etcd/server/v3/config" + apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply" "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.uber.org/zap" @@ -45,7 +47,6 @@ import ( "go.etcd.io/etcd/client/pkg/v3/verify" "go.etcd.io/etcd/pkg/v3/idutil" "go.etcd.io/etcd/pkg/v3/pbutil" - "go.etcd.io/etcd/pkg/v3/runtime" "go.etcd.io/etcd/pkg/v3/schedule" "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/pkg/v3/wait" @@ -149,7 +150,7 @@ type ServerV2 interface { type ServerV3 interface { Server - RaftStatusGetter + apply2.RaftStatusGetter } func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled } @@ -251,7 +252,7 @@ type EtcdServer struct { applyV2 ApplierV2 - uberApply *uberApplier + uberApply *apply2.UberApplier applyWait wait.WaitTime @@ -1076,8 +1077,8 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { s.uberApply = s.NewUberApplier() } -func (s *EtcdServer) NewUberApplier() *uberApplier { - return newUberApplier(s.lg, s.be, s.KV(), s.alarmStore, s.authStore, s.lessor, s.cluster, s, s, s.consistIndex, +func (s *EtcdServer) NewUberApplier() *apply2.UberApplier { + return apply2.NewUberApplier(s.lg, s.be, s.KV(), s.alarmStore, s.authStore, s.lessor, s.cluster, s, s, s.consistIndex, s.Cfg.WarningApplyDuration, s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer, s.Cfg.QuotaBackendBytes) } @@ -1605,15 +1606,6 @@ func (s *EtcdServer) FirstCommitInTermNotify() <-chan struct{} { return s.firstCommitInTerm.Receive() } -// RaftStatusGetter represents etcd server and Raft progress. -type RaftStatusGetter interface { - MemberId() types.ID - Leader() types.ID - CommittedIndex() uint64 - AppliedIndex() uint64 - Term() uint64 -} - func (s *EtcdServer) MemberId() types.ID { return s.memberId } func (s *EtcdServer) Leader() types.ID { return types.ID(s.getLead()) } @@ -1837,7 +1829,7 @@ func (s *EtcdServer) apply( func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { shouldApplyV3 := membership.ApplyV2storeOnly applyV3Performed := false - var ar *applyResult + var ar *apply2.ApplyResult index := s.consistIndex.ConsistentIndex() if e.Index > index { // set the consistent index of current executing entry @@ -1846,7 +1838,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { defer func() { // The txPostLockInsideApplyHook will not get called in some cases, // in which we should move the consistent index forward directly. - if !applyV3Performed || (ar != nil && ar.err != nil) { + if !applyV3Performed || (ar != nil && ar.Err != nil) { s.consistIndex.SetConsistentIndex(e.Index, e.Term) } }() @@ -1912,7 +1904,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { return } - if ar.err != etcderrors.ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { + if ar.Err != etcderrors.ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { s.w.Trigger(id, ar) return } @@ -1922,7 +1914,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { "message exceeded backend quota; raising alarm", zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes), zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))), - zap.Error(ar.err), + zap.Error(ar.Err), ) s.GoAttach(func() { @@ -1936,6 +1928,28 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { }) } +func noSideEffect(r *pb.InternalRaftRequest) bool { + return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil || r.AuthStatus != nil +} + +func removeNeedlessRangeReqs(txn *pb.TxnRequest) { + f := func(ops []*pb.RequestOp) []*pb.RequestOp { + j := 0 + for i := 0; i < len(ops); i++ { + if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok { + continue + } + ops[j] = ops[i] + j++ + } + + return ops[:j] + } + + txn.Success = f(txn.Success) + txn.Failure = f(txn.Failure) +} + // applyConfChange applies a ConfChange to the server. It is only // invoked with a ConfChange that has already passed through Raft func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState, shouldApplyV3 membership.ShouldApplyV3) (bool, error) { diff --git a/server/etcdserver/server_test.go b/server/etcdserver/server_test.go index effbef437e6..78cd1b46bec 100644 --- a/server/etcdserver/server_test.go +++ b/server/etcdserver/server_test.go @@ -46,6 +46,7 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" "go.etcd.io/etcd/server/v3/etcdserver/api/snap" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" + apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply" "go.etcd.io/etcd/server/v3/etcdserver/cindex" "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.etcd.io/etcd/server/v3/lease" @@ -1478,7 +1479,7 @@ func TestPublishV3(t *testing.T) { n := newNodeRecorder() ch := make(chan interface{}, 1) // simulate that request has gone through consensus - ch <- &applyResult{} + ch <- &apply2.ApplyResult{} w := wait.NewWithResponse(ch) ctx, cancel := context.WithCancel(context.Background()) lg := zaptest.NewLogger(t) diff --git a/server/etcdserver/v3_server.go b/server/etcdserver/v3_server.go index 5d2c4365de6..1219f40d0ad 100644 --- a/server/etcdserver/v3_server.go +++ b/server/etcdserver/v3_server.go @@ -28,6 +28,7 @@ import ( "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply" "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.etcd.io/etcd/server/v3/etcdserver/txn" "go.etcd.io/etcd/server/v3/lease" @@ -172,7 +173,7 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse var resp *pb.TxnResponse var err error chk := func(ai *auth.AuthInfo) error { - return checkTxnAuth(s.authStore, ai, r) + return apply2.CheckTxnAuth(s.authStore, ai, r) } defer func(start time.Time) { @@ -201,17 +202,17 @@ func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb. startTime := time.Now() result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r}) trace := traceutil.TODO() - if result != nil && result.trace != nil { - trace = result.trace + if result != nil && result.Trace != nil { + trace = result.Trace defer func() { trace.LogIfLong(traceThreshold) }() - applyStart := result.trace.GetStartTime() - result.trace.SetStartTime(startTime) + applyStart := result.Trace.GetStartTime() + result.Trace.SetStartTime(startTime) trace.InsertStep(0, applyStart, "process raft request") } - if r.Physical && result != nil && result.physc != nil { - <-result.physc + if r.Physical && result != nil && result.Physc != nil { + <-result.Physc // The compaction is done deleting keys; the hash is now settled // but the data is not necessarily committed. If there's a crash, // the hash may revert to a hash prior to compaction completing @@ -227,10 +228,10 @@ func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb. if err != nil { return nil, err } - if result.err != nil { - return nil, result.err + if result.Err != nil { + return nil, result.Err } - resp := result.resp.(*pb.CompactionResponse) + resp := result.Resp.(*pb.CompactionResponse) if resp == nil { resp = &pb.CompactionResponse{} } @@ -611,19 +612,19 @@ func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftReque if err != nil { return nil, err } - if result.err != nil { - return nil, result.err + if result.Err != nil { + return nil, result.Err } - if startTime, ok := ctx.Value(traceutil.StartTimeKey).(time.Time); ok && result.trace != nil { - applyStart := result.trace.GetStartTime() + if startTime, ok := ctx.Value(traceutil.StartTimeKey).(time.Time); ok && result.Trace != nil { + applyStart := result.Trace.GetStartTime() // The trace object is created in apply. Here reset the start time to trace // the raft request time by the difference between the request start time // and apply start time - result.trace.SetStartTime(startTime) - result.trace.InsertStep(0, applyStart, "process raft request") - result.trace.LogIfLong(traceThreshold) + result.Trace.SetStartTime(startTime) + result.Trace.InsertStep(0, applyStart, "process raft request") + result.Trace.LogIfLong(traceThreshold) } - return result.resp, nil + return result.Resp, nil } func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { @@ -655,7 +656,7 @@ func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) e return nil } -func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) { +func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*apply2.ApplyResult, error) { ai := s.getAppliedIndex() ci := s.getCommittedIndex() if ci > ai+maxGapBetweenApplyAndCommitIndex { @@ -708,7 +709,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In select { case x := <-ch: - return x.(*applyResult), nil + return x.(*apply2.ApplyResult), nil case <-cctx.Done(): proposalsFailed.Inc() s.w.Trigger(id, nil) // GC wait From 4e04770bac1124b782752e48136cd1f3bc0d0be2 Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Mon, 4 Apr 2022 20:41:11 +0200 Subject: [PATCH 09/18] Apply encapsulation: Cleanup metrics reporting. Side effect: applySec(0.4s) used to be reported as 0s, now it's correctly 0.4s. --- server/etcdserver/apply/uber_applier.go | 14 +-- server/etcdserver/apply_v2.go | 6 +- server/etcdserver/metrics.go | 19 ---- server/etcdserver/raft_test.go | 6 +- server/etcdserver/txn/metrics.go | 51 +++++++++ server/etcdserver/txn/util.go | 103 ++++++++++++++++++ .../etcdserver/{ => txn}/util_bench_test.go | 5 +- server/etcdserver/util.go | 82 -------------- server/etcdserver/v3_server.go | 4 +- 9 files changed, 173 insertions(+), 117 deletions(-) create mode 100644 server/etcdserver/txn/metrics.go create mode 100644 server/etcdserver/txn/util.go rename server/etcdserver/{ => txn}/util_bench_test.go (91%) diff --git a/server/etcdserver/apply/uber_applier.go b/server/etcdserver/apply/uber_applier.go index f45643ad4ce..e30ef7e865d 100644 --- a/server/etcdserver/apply/uber_applier.go +++ b/server/etcdserver/apply/uber_applier.go @@ -23,6 +23,7 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" "go.etcd.io/etcd/server/v3/etcdserver/cindex" + "go.etcd.io/etcd/server/v3/etcdserver/txn" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/storage/backend" "go.etcd.io/etcd/server/v3/storage/mvcc" @@ -111,13 +112,12 @@ func (a *UberApplier) dispatch(ctx context.Context, r *pb.InternalRaftRequest, s op := "unknown" ar := &ApplyResult{} defer func(start time.Time) { - op += " " - // success := ar.Err == nil || ar.Err == mvcc.ErrCompacted - //etcdserver.applySec.WithLabelValues(v3Version, op, strconv.FormatBool(success)).Observe(time.Since(start).Seconds()) - //etcdserver.warnOfExpensiveRequest(a.lg, a.warningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.Resp, ar.Err) - //if !success { - // etcdserver.warnOfFailedRequest(a.lg, start, &pb.InternalRaftStringer{Request: r}, ar.Resp, ar.Err) - //} + success := ar.Err == nil || ar.Err == mvcc.ErrCompacted + txn.ApplySecObserve(v3Version, op, success, time.Since(start)) + txn.WarnOfExpensiveRequest(a.lg, a.warningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.Resp, ar.Err) + if !success { + txn.WarnOfFailedRequest(a.lg, start, &pb.InternalRaftStringer{Request: r}, ar.Resp, ar.Err) + } }(time.Now()) switch { diff --git a/server/etcdserver/apply_v2.go b/server/etcdserver/apply_v2.go index 538d1dc43a2..75635092185 100644 --- a/server/etcdserver/apply_v2.go +++ b/server/etcdserver/apply_v2.go @@ -18,7 +18,6 @@ import ( "encoding/json" "fmt" "path" - "strconv" "time" "unicode/utf8" @@ -28,6 +27,7 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/txn" "go.uber.org/zap" ) @@ -130,8 +130,8 @@ func (s *EtcdServer) applyV2Request(r *RequestV2, shouldApplyV3 membership.Shoul return } success := resp.Err == nil - applySec.WithLabelValues(v2Version, r.Method, strconv.FormatBool(success)).Observe(time.Since(start).Seconds()) - warnOfExpensiveRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, stringer, nil, nil) + txn.ApplySecObserve(v2Version, r.Method, success, time.Since(start)) + txn.WarnOfExpensiveRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, stringer, nil, nil) }(time.Now()) switch r.Method { diff --git a/server/etcdserver/metrics.go b/server/etcdserver/metrics.go index 33ee02747fc..954dfafca4e 100644 --- a/server/etcdserver/metrics.go +++ b/server/etcdserver/metrics.go @@ -70,12 +70,6 @@ var ( Name: "heartbeat_send_failures_total", Help: "The total number of leader heartbeat send failures (likely overloaded from slow disk).", }) - slowApplies = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "slow_apply_total", - Help: "The total number of slow apply requests (likely overloaded from slow disk).", - }) applySnapshotInProgress = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "etcd", Subsystem: "server", @@ -159,17 +153,6 @@ var ( Name: "limit", Help: "The file descriptor limit.", }) - applySec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "server", - Name: "apply_duration_seconds", - Help: "The latency distributions of v2 apply called by backend.", - - // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2 - // highest bucket start of 0.0001 sec * 2^19 == 52.4288 sec - Buckets: prometheus.ExponentialBuckets(0.0001, 2, 20), - }, - []string{"version", "op", "success"}) ) func init() { @@ -177,7 +160,6 @@ func init() { prometheus.MustRegister(isLeader) prometheus.MustRegister(leaderChanges) prometheus.MustRegister(heartbeatSendFailures) - prometheus.MustRegister(slowApplies) prometheus.MustRegister(applySnapshotInProgress) prometheus.MustRegister(proposalsCommitted) prometheus.MustRegister(proposalsApplied) @@ -194,7 +176,6 @@ func init() { prometheus.MustRegister(learnerPromoteFailed) prometheus.MustRegister(fdUsed) prometheus.MustRegister(fdLimit) - prometheus.MustRegister(applySec) currentVersion.With(prometheus.Labels{ "server_version": version.Version, diff --git a/server/etcdserver/raft_test.go b/server/etcdserver/raft_test.go index 9921929892d..f34548553df 100644 --- a/server/etcdserver/raft_test.go +++ b/server/etcdserver/raft_test.go @@ -33,6 +33,7 @@ import ( ) func TestGetIDs(t *testing.T) { + lg := zaptest.NewLogger(t) addcc := &raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 2} addEntry := raftpb.Entry{Type: raftpb.EntryConfChange, Data: pbutil.MustMarshal(addcc)} removecc := &raftpb.ConfChange{Type: raftpb.ConfChangeRemoveNode, NodeID: 2} @@ -67,7 +68,7 @@ func TestGetIDs(t *testing.T) { if tt.confState != nil { snap.Metadata.ConfState = *tt.confState } - idSet := serverstorage.GetEffectiveNodeIDsFromWalEntries(zaptest.NewLogger(t), &snap, tt.ents) + idSet := serverstorage.GetEffectiveNodeIDsFromWalEntries(lg, &snap, tt.ents) if !reflect.DeepEqual(idSet, tt.widSet) { t.Errorf("#%d: idset = %#v, want %#v", i, idSet, tt.widSet) } @@ -75,6 +76,7 @@ func TestGetIDs(t *testing.T) { } func TestCreateConfigChangeEnts(t *testing.T) { + lg := zaptest.NewLogger(t) m := membership.Member{ ID: types.ID(1), RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}}, @@ -147,7 +149,7 @@ func TestCreateConfigChangeEnts(t *testing.T) { } for i, tt := range tests { - gents := serverstorage.CreateConfigChangeEnts(zaptest.NewLogger(t), tt.ids, tt.self, tt.term, tt.index) + gents := serverstorage.CreateConfigChangeEnts(lg, tt.ids, tt.self, tt.term, tt.index) if !reflect.DeepEqual(gents, tt.wents) { t.Errorf("#%d: ents = %v, want %v", i, gents, tt.wents) } diff --git a/server/etcdserver/txn/metrics.go b/server/etcdserver/txn/metrics.go new file mode 100644 index 00000000000..1e7a6f19712 --- /dev/null +++ b/server/etcdserver/txn/metrics.go @@ -0,0 +1,51 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package txn + +import ( + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + slowApplies = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "slow_apply_total", + Help: "The total number of slow apply requests (likely overloaded from slow disk).", + }) + applySec = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Namespace: "etcd", + Subsystem: "server", + Name: "apply_duration_seconds", + Help: "The latency distributions of v2 apply called by backend.", + + // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2 + // highest bucket start of 0.0001 sec * 2^19 == 52.4288 sec + Buckets: prometheus.ExponentialBuckets(0.0001, 2, 20), + }, + []string{"version", "op", "success"}) +) + +func ApplySecObserve(version, op string, success bool, latency time.Duration) { + applySec.WithLabelValues(version, op, strconv.FormatBool(success)).Observe(float64(latency.Microseconds()) / 1000000.0) +} + +func init() { + prometheus.MustRegister(applySec) + prometheus.MustRegister(slowApplies) +} diff --git a/server/etcdserver/txn/util.go b/server/etcdserver/txn/util.go new file mode 100644 index 00000000000..64e2e01bcb5 --- /dev/null +++ b/server/etcdserver/txn/util.go @@ -0,0 +1,103 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package txn + +import ( + "fmt" + "reflect" + "strings" + "time" + + "github.com/golang/protobuf/proto" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + + "go.uber.org/zap" +) + +func WarnOfExpensiveRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) { + if time.Since(now) <= warningApplyDuration { + return + } + var resp string + if !isNil(respMsg) { + resp = fmt.Sprintf("size:%d", proto.Size(respMsg)) + } + warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "", resp, err) +} + +func WarnOfFailedRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) { + var resp string + if !isNil(respMsg) { + resp = fmt.Sprintf("size:%d", proto.Size(respMsg)) + } + d := time.Since(now) + lg.Warn( + "failed to apply request", + zap.Duration("took", d), + zap.String("request", reqStringer.String()), + zap.String("response", resp), + zap.Error(err), + ) +} + +func WarnOfExpensiveReadOnlyTxnRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) { + if time.Since(now) <= warningApplyDuration { + return + } + reqStringer := pb.NewLoggableTxnRequest(r) + var resp string + if !isNil(txnResponse) { + var resps []string + for _, r := range txnResponse.Responses { + switch op := r.Response.(type) { + case *pb.ResponseOp_ResponseRange: + resps = append(resps, fmt.Sprintf("range_response_count:%d", len(op.ResponseRange.Kvs))) + default: + // only range responses should be in a read only txn request + } + } + resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), txnResponse.Size()) + } + warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only txn ", resp, err) +} + +func WarnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) { + if time.Since(now) <= warningApplyDuration { + return + } + var resp string + if !isNil(rangeResponse) { + resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), rangeResponse.Size()) + } + warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only range ", resp, err) +} + +// callers need make sure time has passed warningApplyDuration +func warnOfExpensiveGenericRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) { + lg.Warn( + "apply request took too long", + zap.Duration("took", time.Since(now)), + zap.Duration("expected-duration", warningApplyDuration), + zap.String("prefix", prefix), + zap.String("request", reqStringer.String()), + zap.String("response", resp), + zap.Error(err), + ) + slowApplies.Inc() +} + +func isNil(msg proto.Message) bool { + return msg == nil || reflect.ValueOf(msg).IsNil() +} diff --git a/server/etcdserver/util_bench_test.go b/server/etcdserver/txn/util_bench_test.go similarity index 91% rename from server/etcdserver/util_bench_test.go rename to server/etcdserver/txn/util_bench_test.go index 57d7d46131e..47a5575371e 100644 --- a/server/etcdserver/util_bench_test.go +++ b/server/etcdserver/txn/util_bench_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package etcdserver +package txn import ( "errors" @@ -46,7 +46,8 @@ func BenchmarkWarnOfExpensiveRequestNoLog(b *testing.B) { Context: nil, } err := errors.New("benchmarking warn of expensive request") + lg := zaptest.NewLogger(b) for n := 0; n < b.N; n++ { - warnOfExpensiveRequest(zaptest.NewLogger(b), time.Second, time.Now(), nil, m, err) + WarnOfExpensiveRequest(lg, time.Second, time.Now(), nil, m, err) } } diff --git a/server/etcdserver/util.go b/server/etcdserver/util.go index 6ad5f0f4c6d..fbba5491b07 100644 --- a/server/etcdserver/util.go +++ b/server/etcdserver/util.go @@ -16,17 +16,11 @@ package etcdserver import ( "fmt" - "reflect" - "strings" "time" - "github.com/golang/protobuf/proto" - pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - - "go.uber.org/zap" ) // isConnectedToQuorumSince checks whether the local member is connected to the @@ -103,82 +97,6 @@ func (nc *notifier) notify(err error) { close(nc.c) } -func warnOfExpensiveRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) { - if time.Since(now) <= warningApplyDuration { - return - } - var resp string - if !isNil(respMsg) { - resp = fmt.Sprintf("size:%d", proto.Size(respMsg)) - } - warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "", resp, err) -} - -func warnOfFailedRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) { - var resp string - if !isNil(respMsg) { - resp = fmt.Sprintf("size:%d", proto.Size(respMsg)) - } - d := time.Since(now) - lg.Warn( - "failed to apply request", - zap.Duration("took", d), - zap.String("request", reqStringer.String()), - zap.String("response", resp), - zap.Error(err), - ) -} - -func warnOfExpensiveReadOnlyTxnRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) { - if time.Since(now) <= warningApplyDuration { - return - } - reqStringer := pb.NewLoggableTxnRequest(r) - var resp string - if !isNil(txnResponse) { - var resps []string - for _, r := range txnResponse.Responses { - switch op := r.Response.(type) { - case *pb.ResponseOp_ResponseRange: - resps = append(resps, fmt.Sprintf("range_response_count:%d", len(op.ResponseRange.Kvs))) - default: - // only range responses should be in a read only txn request - } - } - resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), txnResponse.Size()) - } - warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only txn ", resp, err) -} - -func warnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) { - if time.Since(now) <= warningApplyDuration { - return - } - var resp string - if !isNil(rangeResponse) { - resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), rangeResponse.Size()) - } - warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only range ", resp, err) -} - -// callers need make sure time has passed warningApplyDuration -func warnOfExpensiveGenericRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) { - lg.Warn( - "apply request took too long", - zap.Duration("took", time.Since(now)), - zap.Duration("expected-duration", warningApplyDuration), - zap.String("prefix", prefix), - zap.String("request", reqStringer.String()), - zap.String("response", resp), - zap.Error(err), - ) - slowApplies.Inc() -} - -func isNil(msg proto.Message) bool { - return msg == nil || reflect.ValueOf(msg).IsNil() -} - // panicAlternativeStringer wraps a fmt.Stringer, and if calling String() panics, calls the alternative instead. // This is needed to ensure logging slow v2 requests does not panic, which occurs when running integration tests // with the embedded server with github.com/golang/protobuf v1.4.0+. See https://github.com/etcd-io/etcd/issues/12197. diff --git a/server/etcdserver/v3_server.go b/server/etcdserver/v3_server.go index 1219f40d0ad..abf88be2d10 100644 --- a/server/etcdserver/v3_server.go +++ b/server/etcdserver/v3_server.go @@ -110,7 +110,7 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe var resp *pb.RangeResponse var err error defer func(start time.Time) { - warnOfExpensiveReadOnlyRangeRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err) + txn.WarnOfExpensiveReadOnlyRangeRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err) if resp != nil { trace.AddField( traceutil.Field{Key: "response_count", Value: len(resp.Kvs)}, @@ -177,7 +177,7 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse } defer func(start time.Time) { - warnOfExpensiveReadOnlyTxnRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err) + txn.WarnOfExpensiveReadOnlyTxnRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err) trace.LogIfLong(traceThreshold) }(time.Now()) From c78bf655a741b2028349dd7520e85ca6314cbb10 Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Mon, 4 Apr 2022 20:33:46 +0200 Subject: [PATCH 10/18] Simplify imports and improve comments. --- server/etcdserver/api/v3rpc/header.go | 4 ++-- server/etcdserver/api/v3rpc/watch.go | 6 +++--- server/etcdserver/apply/uber_applier.go | 3 ++- server/etcdserver/server_test.go | 2 +- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/server/etcdserver/api/v3rpc/header.go b/server/etcdserver/api/v3rpc/header.go index 0ba83ab604d..a8f1f92cf99 100644 --- a/server/etcdserver/api/v3rpc/header.go +++ b/server/etcdserver/api/v3rpc/header.go @@ -17,13 +17,13 @@ package v3rpc import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/server/v3/etcdserver" - apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply" + "go.etcd.io/etcd/server/v3/etcdserver/apply" ) type header struct { clusterID int64 memberID int64 - sg apply2.RaftStatusGetter + sg apply.RaftStatusGetter rev func() int64 } diff --git a/server/etcdserver/api/v3rpc/watch.go b/server/etcdserver/api/v3rpc/watch.go index 543921c0459..00b68cdbf0a 100644 --- a/server/etcdserver/api/v3rpc/watch.go +++ b/server/etcdserver/api/v3rpc/watch.go @@ -26,7 +26,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver" - apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply" + "go.etcd.io/etcd/server/v3/etcdserver/apply" "go.etcd.io/etcd/server/v3/storage/mvcc" "go.uber.org/zap" @@ -42,7 +42,7 @@ type watchServer struct { maxRequestBytes int - sg apply2.RaftStatusGetter + sg apply.RaftStatusGetter watchable mvcc.WatchableKV ag AuthGetter } @@ -125,7 +125,7 @@ type serverWatchStream struct { maxRequestBytes int - sg apply2.RaftStatusGetter + sg apply.RaftStatusGetter watchable mvcc.WatchableKV ag AuthGetter diff --git a/server/etcdserver/apply/uber_applier.go b/server/etcdserver/apply/uber_applier.go index e30ef7e865d..df94902d152 100644 --- a/server/etcdserver/apply/uber_applier.go +++ b/server/etcdserver/apply/uber_applier.go @@ -104,6 +104,8 @@ func (a *UberApplier) RestoreAlarms() { } func (a *UberApplier) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult { + // We first execute chain of WrapApply across all objects (e.g. CorruptApplier -> CappedApplier -> Auth -> Quota -> Backend), + // than dispatch(), than individual methods wrappers CorruptApplier.Put(CappedApplier.Put(... BackendApplier.Put()))) return a.applyV3.WrapApply(context.TODO(), r, shouldApplyV3, a.dispatch) } @@ -139,7 +141,6 @@ func (a *UberApplier) dispatch(ctx context.Context, r *pb.InternalRaftRequest, s return nil } - // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls switch { case r.Range != nil: op = "Range" diff --git a/server/etcdserver/server_test.go b/server/etcdserver/server_test.go index 78cd1b46bec..6e344b079f5 100644 --- a/server/etcdserver/server_test.go +++ b/server/etcdserver/server_test.go @@ -1606,7 +1606,7 @@ func TestUpdateVersion(t *testing.T) { srv := &EtcdServer{ lgMu: new(sync.RWMutex), lg: zaptest.NewLogger(t), - memberId: 1, + memberId: 1, Cfg: config.ServerConfig{Logger: zaptest.NewLogger(t), TickMs: 1, SnapshotCatchUpEntries: DefaultSnapshotCatchUpEntries}, r: *newRaftNode(raftNodeConfig{lg: zaptest.NewLogger(t), Node: n}), attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://node1.com"}}, From c62f01e5fe1328efe50c8ef4ca5d69de6f7eb59e Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Tue, 5 Apr 2022 10:06:35 +0200 Subject: [PATCH 11/18] Move CheckTxnAuth to txn. --- server/etcdserver/apply/apply_auth.go | 15 +------ server/etcdserver/txn/txn.go | 56 +++++++++++++++++++++++++++ server/etcdserver/v3_server.go | 2 +- 3 files changed, 59 insertions(+), 14 deletions(-) diff --git a/server/etcdserver/apply/apply_auth.go b/server/etcdserver/apply/apply_auth.go index c42ec181fbc..947d82840e9 100644 --- a/server/etcdserver/apply/apply_auth.go +++ b/server/etcdserver/apply/apply_auth.go @@ -22,6 +22,7 @@ import ( "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + "go.etcd.io/etcd/server/v3/etcdserver/txn" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/storage/mvcc" ) @@ -150,20 +151,8 @@ func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.Req return nil } -func CheckTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error { - for _, c := range rt.Compare { - if err := as.IsRangePermitted(ai, c.Key, c.RangeEnd); err != nil { - return err - } - } - if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil { - return err - } - return checkTxnReqsPermission(as, ai, rt.Failure) -} - func (aa *authApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - if err := CheckTxnAuth(aa.as, &aa.authInfo, rt); err != nil { + if err := txn.CheckTxnAuth(aa.as, &aa.authInfo, rt); err != nil { return nil, nil, err } return aa.applierV3.Txn(ctx, rt) diff --git a/server/etcdserver/txn/txn.go b/server/etcdserver/txn/txn.go index ab33ebd9b57..7e4d5a1ab75 100644 --- a/server/etcdserver/txn/txn.go +++ b/server/etcdserver/txn/txn.go @@ -22,6 +22,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/pkg/v3/traceutil" + "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/storage/mvcc" @@ -624,3 +625,58 @@ func IsTxnReadonly(r *pb.TxnRequest) bool { } return true } + +func CheckTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error { + for _, c := range rt.Compare { + if err := as.IsRangePermitted(ai, c.Key, c.RangeEnd); err != nil { + return err + } + } + if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil { + return err + } + return checkTxnReqsPermission(as, ai, rt.Failure) +} + +func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { + for _, requ := range reqs { + switch tv := requ.Request.(type) { + case *pb.RequestOp_RequestRange: + if tv.RequestRange == nil { + continue + } + + if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil { + return err + } + + case *pb.RequestOp_RequestPut: + if tv.RequestPut == nil { + continue + } + + if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil { + return err + } + + case *pb.RequestOp_RequestDeleteRange: + if tv.RequestDeleteRange == nil { + continue + } + + if tv.RequestDeleteRange.PrevKv { + err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) + if err != nil { + return err + } + } + + err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/server/etcdserver/v3_server.go b/server/etcdserver/v3_server.go index abf88be2d10..59113f1b94b 100644 --- a/server/etcdserver/v3_server.go +++ b/server/etcdserver/v3_server.go @@ -173,7 +173,7 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse var resp *pb.TxnResponse var err error chk := func(ai *auth.AuthInfo) error { - return apply2.CheckTxnAuth(s.authStore, ai, r) + return txn.CheckTxnAuth(s.authStore, ai, r) } defer func(start time.Time) { From 63b2f63cc1a2348409b7b6e11d2c5bfdadec69b6 Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Thu, 5 May 2022 10:48:26 +0200 Subject: [PATCH 12/18] Rename package alising "apply2" -> apply. --- server/etcdserver/raft.go | 18 ++++----- server/etcdserver/raft_test.go | 8 ++-- server/etcdserver/server.go | 66 ++++++++++++++++---------------- server/etcdserver/server_test.go | 2 +- server/etcdserver/v3_server.go | 12 +++--- 5 files changed, 53 insertions(+), 53 deletions(-) diff --git a/server/etcdserver/raft.go b/server/etcdserver/raft.go index 69e6a8c216f..3db8a57c3b6 100644 --- a/server/etcdserver/raft.go +++ b/server/etcdserver/raft.go @@ -61,11 +61,11 @@ func init() { })) } -// apply contains entries, snapshot to be applied. Once -// an apply is consumed, the entries will be persisted to +// toApply contains entries, snapshot to be applied. Once +// an toApply is consumed, the entries will be persisted to // to raft storage concurrently; the application must read // raftDone before assuming the raft messages are stable. -type apply struct { +type toApply struct { entries []raftpb.Entry snapshot raftpb.Snapshot // notifyc synchronizes etcd server applies with the raft node @@ -82,7 +82,7 @@ type raftNode struct { msgSnapC chan raftpb.Message // a chan to send out apply - applyc chan apply + applyc chan toApply // a chan to send out readState readStateC chan raft.ReadState @@ -134,7 +134,7 @@ func newRaftNode(cfg raftNodeConfig) *raftNode { td: contention.NewTimeoutDetector(2 * cfg.heartbeat), readStateC: make(chan raft.ReadState, 1), msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), - applyc: make(chan apply), + applyc: make(chan toApply), stopped: make(chan struct{}), done: make(chan struct{}), } @@ -201,7 +201,7 @@ func (r *raftNode) start(rh *raftReadyHandler) { } notifyc := make(chan struct{}, 1) - ap := apply{ + ap := toApply{ entries: rd.CommittedEntries, snapshot: rd.Snapshot, notifyc: notifyc, @@ -278,7 +278,7 @@ func (r *raftNode) start(rh *raftReadyHandler) { // changes to be applied before sending messages. // Otherwise we might incorrectly count votes (e.g. votes from removed members). // Also slow machine's follower raft-layer could proceed to become the leader - // on its own single-node cluster, before apply-layer applies the config change. + // on its own single-node cluster, before toApply-layer applies the config change. // We simply wait for ALL pending entries to be applied for now. // We might improve this later on if it causes unnecessary long blocking issues. waitApply := false @@ -314,7 +314,7 @@ func (r *raftNode) start(rh *raftReadyHandler) { }() } -func updateCommittedIndex(ap *apply, rh *raftReadyHandler) { +func updateCommittedIndex(ap *toApply, rh *raftReadyHandler) { var ci uint64 if len(ap.entries) != 0 { ci = ap.entries[len(ap.entries)-1].Index @@ -372,7 +372,7 @@ func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { return ms } -func (r *raftNode) apply() chan apply { +func (r *raftNode) apply() chan toApply { return r.applyc } diff --git a/server/etcdserver/raft_test.go b/server/etcdserver/raft_test.go index f34548553df..6644d557c73 100644 --- a/server/etcdserver/raft_test.go +++ b/server/etcdserver/raft_test.go @@ -171,7 +171,7 @@ func TestStopRaftWhenWaitingForApplyDone(t *testing.T) { select { case <-srv.r.applyc: case <-time.After(time.Second): - t.Fatalf("failed to receive apply struct") + t.Fatalf("failed to receive toApply struct") } srv.r.stopped <- struct{}{} @@ -182,7 +182,7 @@ func TestStopRaftWhenWaitingForApplyDone(t *testing.T) { } } -// TestConfigChangeBlocksApply ensures apply blocks if committed entries contain config-change. +// TestConfigChangeBlocksApply ensures toApply blocks if committed entries contain config-change. func TestConfigChangeBlocksApply(t *testing.T) { n := newNopReadyNode() @@ -217,11 +217,11 @@ func TestConfigChangeBlocksApply(t *testing.T) { select { case <-continueC: - t.Fatalf("unexpected execution: raft routine should block waiting for apply") + t.Fatalf("unexpected execution: raft routine should block waiting for toApply") case <-time.After(time.Second): } - // finish apply, unblock raft routine + // finish toApply, unblock raft routine <-ap.notifyc select { diff --git a/server/etcdserver/server.go b/server/etcdserver/server.go index 5f0ddb69adb..4ddf983b66b 100644 --- a/server/etcdserver/server.go +++ b/server/etcdserver/server.go @@ -35,7 +35,7 @@ import ( "go.etcd.io/etcd/pkg/v3/notify" "go.etcd.io/etcd/pkg/v3/runtime" "go.etcd.io/etcd/server/v3/config" - apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply" + "go.etcd.io/etcd/server/v3/etcdserver/apply" "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" "go.uber.org/zap" @@ -150,7 +150,7 @@ type ServerV2 interface { type ServerV3 interface { Server - apply2.RaftStatusGetter + apply.RaftStatusGetter } func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled } @@ -252,7 +252,7 @@ type EtcdServer struct { applyV2 ApplierV2 - uberApply *apply2.UberApplier + uberApply *apply.UberApplier applyWait wait.WaitTime @@ -727,7 +727,7 @@ type etcdProgress struct { // raftReadyHandler contains a set of EtcdServer operations to be called by raftNode, // and helps decouple state machine logic from Raft algorithms. -// TODO: add a state machine interface to apply the commit entries and do snapshot/recover +// TODO: add a state machine interface to toApply the commit entries and do snapshot/recover type raftReadyHandler struct { getLead func() (lead uint64) updateLead func(lead uint64) @@ -743,7 +743,7 @@ func (s *EtcdServer) run() { lg.Panic("failed to get snapshot from Raft storage", zap.Error(err)) } - // asynchronously accept apply packets, dispatch progress in-order + // asynchronously accept toApply packets, dispatch progress in-order sched := schedule.NewFIFOScheduler() var ( @@ -905,7 +905,7 @@ func (s *EtcdServer) Cleanup() { } } -func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { +func (s *EtcdServer) applyAll(ep *etcdProgress, apply *toApply) { s.applySnapshot(ep, apply) s.applyEntries(ep, apply) @@ -914,7 +914,7 @@ func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { // wait for the raft routine to finish the disk writes before triggering a // snapshot. or applied index might be greater than the last index in raft - // storage, since the raft routine might be slower than apply routine. + // storage, since the raft routine might be slower than toApply routine. <-apply.notifyc s.triggerSnapshot(ep) @@ -927,8 +927,8 @@ func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { } } -func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { - if raft.IsEmptySnap(apply.snapshot) { +func (s *EtcdServer) applySnapshot(ep *etcdProgress, toApply *toApply) { + if raft.IsEmptySnap(toApply.snapshot) { return } applySnapshotInProgress.Inc() @@ -938,34 +938,34 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { "applying snapshot", zap.Uint64("current-snapshot-index", ep.snapi), zap.Uint64("current-applied-index", ep.appliedi), - zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index), - zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term), + zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index), + zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term), ) defer func() { lg.Info( "applied snapshot", zap.Uint64("current-snapshot-index", ep.snapi), zap.Uint64("current-applied-index", ep.appliedi), - zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index), - zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term), + zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index), + zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term), ) applySnapshotInProgress.Dec() }() - if apply.snapshot.Metadata.Index <= ep.appliedi { + if toApply.snapshot.Metadata.Index <= ep.appliedi { lg.Panic( "unexpected leader snapshot from outdated index", zap.Uint64("current-snapshot-index", ep.snapi), zap.Uint64("current-applied-index", ep.appliedi), - zap.Uint64("incoming-leader-snapshot-index", apply.snapshot.Metadata.Index), - zap.Uint64("incoming-leader-snapshot-term", apply.snapshot.Metadata.Term), + zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index), + zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term), ) } // wait for raftNode to persist snapshot onto the disk - <-apply.notifyc + <-toApply.notifyc - newbe, err := serverstorage.OpenSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot, s.beHooks) + newbe, err := serverstorage.OpenSnapshotBackend(s.Cfg, s.snapshotter, toApply.snapshot, s.beHooks) if err != nil { lg.Panic("failed to open snapshot backend", zap.Error(err)) } @@ -1033,7 +1033,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { } lg.Info("restoring v2 store") - if err := s.v2store.Recovery(apply.snapshot.Data); err != nil { + if err := s.v2store.Recovery(toApply.snapshot.Data); err != nil { lg.Panic("failed to restore v2 store", zap.Error(err)) } @@ -1067,18 +1067,18 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { lg.Info("added peers from new cluster configuration") - ep.appliedt = apply.snapshot.Metadata.Term - ep.appliedi = apply.snapshot.Metadata.Index + ep.appliedt = toApply.snapshot.Metadata.Term + ep.appliedi = toApply.snapshot.Metadata.Index ep.snapi = ep.appliedi - ep.confState = apply.snapshot.Metadata.ConfState + ep.confState = toApply.snapshot.Metadata.ConfState // As backends and implementations like alarmsStore changed, we need // to re-bootstrap Appliers. s.uberApply = s.NewUberApplier() } -func (s *EtcdServer) NewUberApplier() *apply2.UberApplier { - return apply2.NewUberApplier(s.lg, s.be, s.KV(), s.alarmStore, s.authStore, s.lessor, s.cluster, s, s, s.consistIndex, +func (s *EtcdServer) NewUberApplier() *apply.UberApplier { + return apply.NewUberApplier(s.lg, s.be, s.KV(), s.alarmStore, s.authStore, s.lessor, s.cluster, s, s, s.consistIndex, s.Cfg.WarningApplyDuration, s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer, s.Cfg.QuotaBackendBytes) } @@ -1090,7 +1090,7 @@ func verifySnapshotIndex(snapshot raftpb.Snapshot, cindex uint64) { }) } -func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { +func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *toApply) { if len(apply.entries) == 0 { return } @@ -1277,7 +1277,7 @@ func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) err // Note that this permission check is done in the API layer, // so TOCTOU problem can be caused potentially in a schedule like this: - // update membership with user A -> revoke root role of A -> apply membership change + // update membership with user A -> revoke root role of A -> toApply membership change // in the state machine layer // However, both of membership change and role management requires the root privilege. // So careful operation by admins can prevent the problem. @@ -1469,7 +1469,7 @@ func (s *EtcdServer) mayPromoteMember(id types.ID) error { // check whether the learner catches up with leader or not. // Note: it will return nil if member is not found in cluster or if member is not learner. -// These two conditions will be checked before apply phase later. +// These two conditions will be checked before toApply phase later. func (s *EtcdServer) isLearnerReady(id uint64) error { rs := s.raftStatus() @@ -1774,7 +1774,7 @@ func (s *EtcdServer) sendMergedSnap(merged snap.Message) { }) } -// apply takes entries received from Raft (after it has been committed) and +// toApply takes entries received from Raft (after it has been committed) and // applies them to the current state of the EtcdServer. // The given entries should not be empty. func (s *EtcdServer) apply( @@ -1795,7 +1795,7 @@ func (s *EtcdServer) apply( s.setTerm(e.Term) case raftpb.EntryConfChange: - // We need to apply all WAL entries on top of v2store + // We need to toApply all WAL entries on top of v2store // and only 'unapplied' (e.Index>backend.ConsistentIndex) on the backend. shouldApplyV3 := membership.ApplyV2storeOnly @@ -1829,7 +1829,7 @@ func (s *EtcdServer) apply( func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { shouldApplyV3 := membership.ApplyV2storeOnly applyV3Performed := false - var ar *apply2.ApplyResult + var ar *apply.ApplyResult index := s.consistIndex.ConsistentIndex() if e.Index > index { // set the consistent index of current executing entry @@ -1843,7 +1843,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { } }() } - s.lg.Debug("apply entry normal", + s.lg.Debug("toApply entry normal", zap.Uint64("consistent-index", index), zap.Uint64("entry-index", e.Index), zap.Bool("should-applyV3", bool(shouldApplyV3))) @@ -1895,7 +1895,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { ar = s.uberApply.Apply(&raftReq, shouldApplyV3) } - // do not re-apply applied entries. + // do not re-toApply applied entries. if !shouldApplyV3 { return } @@ -2039,7 +2039,7 @@ func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) { // KV().commit() updates the consistent index in backend. // All operations that update consistent index must be called sequentially // from applyAll function. - // So KV().Commit() cannot run in parallel with apply. It has to be called outside + // So KV().Commit() cannot run in parallel with toApply. It has to be called outside // the go routine created below. s.KV().Commit() diff --git a/server/etcdserver/server_test.go b/server/etcdserver/server_test.go index 6e344b079f5..fe770ec098b 100644 --- a/server/etcdserver/server_test.go +++ b/server/etcdserver/server_test.go @@ -723,7 +723,7 @@ func realisticRaftNode(lg *zap.Logger) *raftNode { return r } -// TestApplyMultiConfChangeShouldStop ensures that apply will return shouldStop +// TestApplyMultiConfChangeShouldStop ensures that toApply will return shouldStop // if the local member is removed along with other conf updates. func TestApplyMultiConfChangeShouldStop(t *testing.T) { lg := zaptest.NewLogger(t) diff --git a/server/etcdserver/v3_server.go b/server/etcdserver/v3_server.go index 59113f1b94b..da6542299c6 100644 --- a/server/etcdserver/v3_server.go +++ b/server/etcdserver/v3_server.go @@ -43,7 +43,7 @@ import ( const ( // In the health case, there might be a small gap (10s of entries) between // the applied index and committed index. - // However, if the committed entries are very heavy to apply, the gap might grow. + // However, if the committed entries are very heavy to toApply, the gap might grow. // We should stop accepting new proposals if the gap growing to a certain point. maxGapBetweenApplyAndCommitIndex = 5000 traceThreshold = 100 * time.Millisecond @@ -63,9 +63,9 @@ type RaftKV interface { } type Lessor interface { - // LeaseGrant sends LeaseGrant request to raft and apply it after committed. + // LeaseGrant sends LeaseGrant request to raft and toApply it after committed. LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) - // LeaseRevoke sends LeaseRevoke request to raft and apply it after committed. + // LeaseRevoke sends LeaseRevoke request to raft and toApply it after committed. LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) // LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error @@ -223,7 +223,7 @@ func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb. s.bemu.RLock() s.be.ForceCommit() s.bemu.RUnlock() - trace.Step("physically apply compaction") + trace.Step("physically toApply compaction") } if err != nil { return nil, err @@ -617,9 +617,9 @@ func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftReque } if startTime, ok := ctx.Value(traceutil.StartTimeKey).(time.Time); ok && result.Trace != nil { applyStart := result.Trace.GetStartTime() - // The trace object is created in apply. Here reset the start time to trace + // The trace object is created in toApply. Here reset the start time to trace // the raft request time by the difference between the request start time - // and apply start time + // and toApply start time result.Trace.SetStartTime(startTime) result.Trace.InsertStep(0, applyStart, "process raft request") result.Trace.LogIfLong(traceThreshold) From 5097b33ab9161634a31483aac323ef1bb23d0761 Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Fri, 6 May 2022 13:42:02 +0200 Subject: [PATCH 13/18] Rename etcdserver/etcderrors package to etcdserver/errors. --- server/etcdmain/etcd.go | 4 +- server/etcdserver/api/etcdhttp/peer.go | 4 +- server/etcdserver/api/etcdhttp/utils.go | 6 +- server/etcdserver/api/v3rpc/maintenance.go | 4 +- server/etcdserver/api/v3rpc/util.go | 68 +++++++++---------- server/etcdserver/apply/apply.go | 14 ++-- server/etcdserver/apply/corrupt.go | 16 ++--- server/etcdserver/apply_v2.go | 4 +- server/etcdserver/bootstrap.go | 4 +- server/etcdserver/cluster_util.go | 10 +-- .../{etcderrors => errors}/errors.go | 2 +- server/etcdserver/server.go | 60 ++++++++-------- server/etcdserver/server_test.go | 22 +++--- server/etcdserver/txn/txn.go | 6 +- server/etcdserver/v2_server.go | 6 +- server/etcdserver/v3_server.go | 40 +++++------ tests/functional/tester/stresser_key.go | 6 +- 17 files changed, 138 insertions(+), 138 deletions(-) rename server/etcdserver/{etcderrors => errors}/errors.go (99%) diff --git a/server/etcdmain/etcd.go b/server/etcdmain/etcd.go index 2999fe13a6c..f35ebde6b55 100644 --- a/server/etcdmain/etcd.go +++ b/server/etcdmain/etcd.go @@ -26,7 +26,7 @@ import ( "go.etcd.io/etcd/pkg/v3/osutil" "go.etcd.io/etcd/server/v3/embed" "go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" "go.uber.org/zap" "google.golang.org/grpc" ) @@ -126,7 +126,7 @@ func startEtcdOrProxyV2(args []string) { } if err != nil { - if derr, ok := err.(*etcderrors.DiscoveryError); ok { + if derr, ok := err.(*errors.DiscoveryError); ok { switch derr.Err { case v2discovery.ErrDuplicateID: lg.Warn( diff --git a/server/etcdserver/api/etcdhttp/peer.go b/server/etcdserver/api/etcdhttp/peer.go index 3def6bae3fe..a205eca65c4 100644 --- a/server/etcdserver/api/etcdhttp/peer.go +++ b/server/etcdserver/api/etcdhttp/peer.go @@ -26,7 +26,7 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" "go.etcd.io/etcd/server/v3/lease/leasehttp" "go.uber.org/zap" @@ -143,7 +143,7 @@ func (h *peerMemberPromoteHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ http.Error(w, err.Error(), http.StatusNotFound) case membership.ErrMemberNotLearner: http.Error(w, err.Error(), http.StatusPreconditionFailed) - case etcderrors.ErrLearnerNotReady: + case errors.ErrLearnerNotReady: http.Error(w, err.Error(), http.StatusPreconditionFailed) default: writeError(h.lg, w, r, err) diff --git a/server/etcdserver/api/etcdhttp/utils.go b/server/etcdserver/api/etcdhttp/utils.go index a1e8176db90..055e03da897 100644 --- a/server/etcdserver/api/etcdhttp/utils.go +++ b/server/etcdserver/api/etcdhttp/utils.go @@ -17,9 +17,9 @@ package etcdhttp import ( "net/http" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" httptypes "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types" "go.etcd.io/etcd/server/v3/etcdserver/api/v2error" + "go.etcd.io/etcd/server/v3/etcdserver/errors" "go.uber.org/zap" ) @@ -57,8 +57,8 @@ func writeError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err erro default: switch err { - case etcderrors.ErrTimeoutDueToLeaderFail, etcderrors.ErrTimeoutDueToConnectionLost, etcderrors.ErrNotEnoughStartedMembers, - etcderrors.ErrUnhealthy: + case errors.ErrTimeoutDueToLeaderFail, errors.ErrTimeoutDueToConnectionLost, errors.ErrNotEnoughStartedMembers, + errors.ErrUnhealthy: if lg != nil { lg.Warn( "v2 response error", diff --git a/server/etcdserver/api/v3rpc/maintenance.go b/server/etcdserver/api/v3rpc/maintenance.go index 54a4106046c..bac521db645 100644 --- a/server/etcdserver/api/v3rpc/maintenance.go +++ b/server/etcdserver/api/v3rpc/maintenance.go @@ -28,7 +28,7 @@ import ( "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver" "go.etcd.io/etcd/server/v3/etcdserver/apply" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" serverversion "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/storage/backend" "go.etcd.io/etcd/server/v3/storage/mvcc" @@ -243,7 +243,7 @@ func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) ( resp.StorageVersion = storageVersion.String() } if resp.Leader == raft.None { - resp.Errors = append(resp.Errors, etcderrors.ErrNoLeader.Error()) + resp.Errors = append(resp.Errors, errors.ErrNoLeader.Error()) } for _, a := range ms.a.Alarms() { resp.Errors = append(resp.Errors, a.String()) diff --git a/server/etcdserver/api/v3rpc/util.go b/server/etcdserver/api/v3rpc/util.go index 3d04d8cc40f..0fd607d6d61 100644 --- a/server/etcdserver/api/v3rpc/util.go +++ b/server/etcdserver/api/v3rpc/util.go @@ -22,7 +22,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/storage/mvcc" @@ -32,39 +32,39 @@ import ( ) var toGRPCErrorMap = map[error]error{ - membership.ErrIDRemoved: rpctypes.ErrGRPCMemberNotFound, - membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound, - membership.ErrIDExists: rpctypes.ErrGRPCMemberExist, - membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist, - membership.ErrMemberNotLearner: rpctypes.ErrGRPCMemberNotLearner, - membership.ErrTooManyLearners: rpctypes.ErrGRPCTooManyLearners, - etcderrors.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted, - etcderrors.ErrLearnerNotReady: rpctypes.ErrGRPCLearnerNotReady, - - mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted, - mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev, - etcderrors.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge, - etcderrors.ErrNoSpace: rpctypes.ErrGRPCNoSpace, - etcderrors.ErrTooManyRequests: rpctypes.ErrTooManyRequests, - - etcderrors.ErrNoLeader: rpctypes.ErrGRPCNoLeader, - etcderrors.ErrNotLeader: rpctypes.ErrGRPCNotLeader, - etcderrors.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged, - etcderrors.ErrStopped: rpctypes.ErrGRPCStopped, - etcderrors.ErrTimeout: rpctypes.ErrGRPCTimeout, - etcderrors.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail, - etcderrors.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost, - etcderrors.ErrTimeoutWaitAppliedIndex: rpctypes.ErrGRPCTimeoutWaitAppliedIndex, - etcderrors.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy, - etcderrors.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound, - etcderrors.ErrCorrupt: rpctypes.ErrGRPCCorrupt, - etcderrors.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee, - - etcderrors.ErrClusterVersionUnavailable: rpctypes.ErrGRPCClusterVersionUnavailable, - etcderrors.ErrWrongDowngradeVersionFormat: rpctypes.ErrGRPCWrongDowngradeVersionFormat, - version.ErrInvalidDowngradeTargetVersion: rpctypes.ErrGRPCInvalidDowngradeTargetVersion, - version.ErrDowngradeInProcess: rpctypes.ErrGRPCDowngradeInProcess, - version.ErrNoInflightDowngrade: rpctypes.ErrGRPCNoInflightDowngrade, + membership.ErrIDRemoved: rpctypes.ErrGRPCMemberNotFound, + membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound, + membership.ErrIDExists: rpctypes.ErrGRPCMemberExist, + membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist, + membership.ErrMemberNotLearner: rpctypes.ErrGRPCMemberNotLearner, + membership.ErrTooManyLearners: rpctypes.ErrGRPCTooManyLearners, + errors.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted, + errors.ErrLearnerNotReady: rpctypes.ErrGRPCLearnerNotReady, + + mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted, + mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev, + errors.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge, + errors.ErrNoSpace: rpctypes.ErrGRPCNoSpace, + errors.ErrTooManyRequests: rpctypes.ErrTooManyRequests, + + errors.ErrNoLeader: rpctypes.ErrGRPCNoLeader, + errors.ErrNotLeader: rpctypes.ErrGRPCNotLeader, + errors.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged, + errors.ErrStopped: rpctypes.ErrGRPCStopped, + errors.ErrTimeout: rpctypes.ErrGRPCTimeout, + errors.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail, + errors.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost, + errors.ErrTimeoutWaitAppliedIndex: rpctypes.ErrGRPCTimeoutWaitAppliedIndex, + errors.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy, + errors.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound, + errors.ErrCorrupt: rpctypes.ErrGRPCCorrupt, + errors.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee, + + errors.ErrClusterVersionUnavailable: rpctypes.ErrGRPCClusterVersionUnavailable, + errors.ErrWrongDowngradeVersionFormat: rpctypes.ErrGRPCWrongDowngradeVersionFormat, + version.ErrInvalidDowngradeTargetVersion: rpctypes.ErrGRPCInvalidDowngradeTargetVersion, + version.ErrDowngradeInProcess: rpctypes.ErrGRPCDowngradeInProcess, + version.ErrNoInflightDowngrade: rpctypes.ErrGRPCNoInflightDowngrade, lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound, lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist, diff --git a/server/etcdserver/apply/apply.go b/server/etcdserver/apply/apply.go index 623f9a5b550..849e0ddc4e1 100644 --- a/server/etcdserver/apply/apply.go +++ b/server/etcdserver/apply/apply.go @@ -27,7 +27,7 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" "go.etcd.io/etcd/server/v3/etcdserver/cindex" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" mvcc_txn "go.etcd.io/etcd/server/v3/etcdserver/txn" "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/lease" @@ -252,18 +252,18 @@ type applierV3Capped struct { func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } func (a *applierV3Capped) Put(_ context.Context, _ mvcc.TxnWrite, _ *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { - return nil, nil, etcderrors.ErrNoSpace + return nil, nil, errors.ErrNoSpace } func (a *applierV3Capped) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { if a.q.Cost(r) > 0 { - return nil, nil, etcderrors.ErrNoSpace + return nil, nil, errors.ErrNoSpace } return a.applierV3.Txn(ctx, r) } func (a *applierV3Capped) LeaseGrant(_ *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return nil, etcderrors.ErrNoSpace + return nil, errors.ErrNoSpace } func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) { @@ -447,7 +447,7 @@ func (a *quotaApplierV3) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRe ok := a.q.Available(p) resp, trace, err := a.applierV3.Put(ctx, txn, p) if err == nil && !ok { - err = etcderrors.ErrNoSpace + err = errors.ErrNoSpace } return resp, trace, err } @@ -456,7 +456,7 @@ func (a *quotaApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnRes ok := a.q.Available(rt) resp, trace, err := a.applierV3.Txn(ctx, rt) if err == nil && !ok { - err = etcderrors.ErrNoSpace + err = errors.ErrNoSpace } return resp, trace, err } @@ -465,7 +465,7 @@ func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantRes ok := a.q.Available(lc) resp, err := a.applierV3.LeaseGrant(lc) if err == nil && !ok { - err = etcderrors.ErrNoSpace + err = errors.ErrNoSpace } return resp, err } diff --git a/server/etcdserver/apply/corrupt.go b/server/etcdserver/apply/corrupt.go index 32620cde27b..d8353514e09 100644 --- a/server/etcdserver/apply/corrupt.go +++ b/server/etcdserver/apply/corrupt.go @@ -19,7 +19,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/pkg/v3/traceutil" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" "go.etcd.io/etcd/server/v3/storage/mvcc" ) @@ -30,29 +30,29 @@ type applierV3Corrupt struct { func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} } func (a *applierV3Corrupt) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { - return nil, nil, etcderrors.ErrCorrupt + return nil, nil, errors.ErrCorrupt } func (a *applierV3Corrupt) Range(ctx context.Context, txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) { - return nil, etcderrors.ErrCorrupt + return nil, errors.ErrCorrupt } func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - return nil, etcderrors.ErrCorrupt + return nil, errors.ErrCorrupt } func (a *applierV3Corrupt) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - return nil, nil, etcderrors.ErrCorrupt + return nil, nil, errors.ErrCorrupt } func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { - return nil, nil, nil, etcderrors.ErrCorrupt + return nil, nil, nil, errors.ErrCorrupt } func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - return nil, etcderrors.ErrCorrupt + return nil, errors.ErrCorrupt } func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - return nil, etcderrors.ErrCorrupt + return nil, errors.ErrCorrupt } diff --git a/server/etcdserver/apply_v2.go b/server/etcdserver/apply_v2.go index 75635092185..c83c6219304 100644 --- a/server/etcdserver/apply_v2.go +++ b/server/etcdserver/apply_v2.go @@ -26,7 +26,7 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" "go.etcd.io/etcd/server/v3/etcdserver/txn" "go.uber.org/zap" @@ -147,7 +147,7 @@ func (s *EtcdServer) applyV2Request(r *RequestV2, shouldApplyV3 membership.Shoul return s.applyV2.Sync(r) default: // This should never be reached, but just in case: - return Response{Err: etcderrors.ErrUnknownMethod} + return Response{Err: errors.ErrUnknownMethod} } } diff --git a/server/etcdserver/bootstrap.go b/server/etcdserver/bootstrap.go index f12670f15be..a1704292bcf 100644 --- a/server/etcdserver/bootstrap.go +++ b/server/etcdserver/bootstrap.go @@ -25,7 +25,7 @@ import ( "github.com/coreos/go-semver/semver" "github.com/dustin/go-humanize" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" "go.uber.org/zap" "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -338,7 +338,7 @@ func bootstrapNewClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper) (* str, err = v3discovery.JoinCluster(cfg.Logger, &cfg.DiscoveryCfg, m.ID, cfg.InitialPeerURLsMap.String()) } if err != nil { - return nil, &etcderrors.DiscoveryError{Op: "join", Err: err} + return nil, &errors.DiscoveryError{Op: "join", Err: err} } var urlsmap types.URLsMap urlsmap, err = types.NewURLsMap(str) diff --git a/server/etcdserver/cluster_util.go b/server/etcdserver/cluster_util.go index d96e8548bab..93e7e350caa 100644 --- a/server/etcdserver/cluster_util.go +++ b/server/etcdserver/cluster_util.go @@ -28,7 +28,7 @@ import ( "go.etcd.io/etcd/api/v3/version" "go.etcd.io/etcd/client/pkg/v3/types" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" "github.com/coreos/go-semver/semver" "go.uber.org/zap" @@ -305,12 +305,12 @@ func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.R } if resp.StatusCode == http.StatusRequestTimeout { - return nil, etcderrors.ErrTimeout + return nil, errors.ErrTimeout } if resp.StatusCode == http.StatusPreconditionFailed { // both ErrMemberNotLearner and ErrLearnerNotReady have same http status code - if strings.Contains(string(b), etcderrors.ErrLearnerNotReady.Error()) { - return nil, etcderrors.ErrLearnerNotReady + if strings.Contains(string(b), errors.ErrLearnerNotReady.Error()) { + return nil, errors.ErrLearnerNotReady } if strings.Contains(string(b), membership.ErrMemberNotLearner.Error()) { return nil, membership.ErrMemberNotLearner @@ -409,7 +409,7 @@ func convertToClusterVersion(v string) (*semver.Version, error) { // allow input version format Major.Minor ver, err = semver.NewVersion(v + ".0") if err != nil { - return nil, etcderrors.ErrWrongDowngradeVersionFormat + return nil, errors.ErrWrongDowngradeVersionFormat } } // cluster version only keeps major.minor, remove patch version diff --git a/server/etcdserver/etcderrors/errors.go b/server/etcdserver/errors/errors.go similarity index 99% rename from server/etcdserver/etcderrors/errors.go rename to server/etcdserver/errors/errors.go index 80102311811..8de698a1df3 100644 --- a/server/etcdserver/etcderrors/errors.go +++ b/server/etcdserver/errors/errors.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package etcderrors +package errors import ( "errors" diff --git a/server/etcdserver/server.go b/server/etcdserver/server.go index 4ddf983b66b..be96f326b60 100644 --- a/server/etcdserver/server.go +++ b/server/etcdserver/server.go @@ -36,7 +36,7 @@ import ( "go.etcd.io/etcd/pkg/v3/runtime" "go.etcd.io/etcd/server/v3/config" "go.etcd.io/etcd/server/v3/etcdserver/apply" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" "go.uber.org/zap" pb "go.etcd.io/etcd/api/v3/etcdserverpb" @@ -976,7 +976,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, toApply *toApply) { // Eventually the new consistent_index value coming from snapshot is overwritten // by the old value. s.consistIndex.SetBackend(newbe) - verifySnapshotIndex(apply.snapshot, s.consistIndex.ConsistentIndex()) + verifySnapshotIndex(toApply.snapshot, s.consistIndex.ConsistentIndex()) // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. @@ -1154,7 +1154,7 @@ func (s *EtcdServer) isLeader() bool { // MoveLeader transfers the leader to the given transferee. func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error { if !s.cluster.IsMemberExist(types.ID(transferee)) || s.cluster.Member(types.ID(transferee)).IsLearner { - return etcderrors.ErrBadLeaderTransferee + return errors.ErrBadLeaderTransferee } now := time.Now() @@ -1172,7 +1172,7 @@ func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) er for s.Lead() != transferee { select { case <-ctx.Done(): // time out - return etcderrors.ErrTimeoutLeaderTransfer + return errors.ErrTimeoutLeaderTransfer case <-time.After(interval): } } @@ -1211,7 +1211,7 @@ func (s *EtcdServer) TransferLeadership() error { transferee, ok := longestConnected(s.r.transport, s.cluster.VotingMemberIDs()) if !ok { - return etcderrors.ErrUnhealthy + return errors.ErrUnhealthy } tm := s.Cfg.ReqTimeout() @@ -1330,9 +1330,9 @@ func (s *EtcdServer) mayAddMember(memb membership.Member) error { "rejecting member add request; not enough healthy members", zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), - zap.Error(etcderrors.ErrNotEnoughStartedMembers), + zap.Error(errors.ErrNotEnoughStartedMembers), ) - return etcderrors.ErrNotEnoughStartedMembers + return errors.ErrNotEnoughStartedMembers } if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.MemberId(), s.cluster.VotingMembers()) { @@ -1340,9 +1340,9 @@ func (s *EtcdServer) mayAddMember(memb membership.Member) error { "rejecting member add request; local member has not been connected to all peers, reconfigure breaks active quorum", zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-add", fmt.Sprintf("%+v", memb)), - zap.Error(etcderrors.ErrUnhealthy), + zap.Error(errors.ErrUnhealthy), ) - return etcderrors.ErrUnhealthy + return errors.ErrUnhealthy } return nil @@ -1375,7 +1375,7 @@ func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membershi learnerPromoteSucceed.Inc() return resp, nil } - if err != etcderrors.ErrNotLeader { + if err != errors.ErrNotLeader { learnerPromoteFailed.WithLabelValues(err.Error()).Inc() return resp, err } @@ -1394,16 +1394,16 @@ func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membershi return resp, nil } // If member promotion failed, return early. Otherwise keep retry. - if err == etcderrors.ErrLearnerNotReady || err == membership.ErrIDNotFound || err == membership.ErrMemberNotLearner { + if err == errors.ErrLearnerNotReady || err == membership.ErrIDNotFound || err == membership.ErrMemberNotLearner { return nil, err } } } if cctx.Err() == context.DeadlineExceeded { - return nil, etcderrors.ErrTimeout + return nil, errors.ErrTimeout } - return nil, etcderrors.ErrCanceled + return nil, errors.ErrCanceled } // promoteMember checks whether the to-be-promoted learner node is ready before sending the promote @@ -1459,9 +1459,9 @@ func (s *EtcdServer) mayPromoteMember(id types.ID) error { "rejecting member promote request; not enough healthy members", zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-remove-id", id.String()), - zap.Error(etcderrors.ErrNotEnoughStartedMembers), + zap.Error(errors.ErrNotEnoughStartedMembers), ) - return etcderrors.ErrNotEnoughStartedMembers + return errors.ErrNotEnoughStartedMembers } return nil @@ -1475,7 +1475,7 @@ func (s *EtcdServer) isLearnerReady(id uint64) error { // leader's raftStatus.Progress is not nil if rs.Progress == nil { - return etcderrors.ErrNotLeader + return errors.ErrNotLeader } var learnerMatch uint64 @@ -1494,7 +1494,7 @@ func (s *EtcdServer) isLearnerReady(id uint64) error { leaderMatch := rs.Progress[leaderID].Match // the learner's Match not caught up with leader yet if float64(learnerMatch) < float64(leaderMatch)*readyPercent { - return etcderrors.ErrLearnerNotReady + return errors.ErrLearnerNotReady } } @@ -1518,9 +1518,9 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error { "rejecting member remove request; not enough healthy members", zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-remove-id", id.String()), - zap.Error(etcderrors.ErrNotEnoughStartedMembers), + zap.Error(errors.ErrNotEnoughStartedMembers), ) - return etcderrors.ErrNotEnoughStartedMembers + return errors.ErrNotEnoughStartedMembers } // downed member is safe to remove since it's not part of the active quorum @@ -1537,9 +1537,9 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error { zap.String("local-member-id", s.MemberId().String()), zap.String("requested-member-remove", id.String()), zap.Int("active-peers", active), - zap.Error(etcderrors.ErrUnhealthy), + zap.Error(errors.ErrUnhealthy), ) - return etcderrors.ErrUnhealthy + return errors.ErrUnhealthy } return nil @@ -1656,7 +1656,7 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*me return nil, s.parseProposeCtxErr(ctx.Err(), start) case <-s.stopping: - return nil, etcderrors.ErrStopped + return nil, errors.ErrStopped } } @@ -1904,7 +1904,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { return } - if ar.Err != etcderrors.ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { + if ar.Err != errors.ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 { s.w.Trigger(id, ar) return } @@ -2209,7 +2209,7 @@ func (s *EtcdServer) updateClusterVersionV2(ver string) { lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver))) return - case etcderrors.ErrStopped: + case errors.ErrStopped: lg.Warn("aborting cluster version update; server is stopped", zap.Error(err)) return @@ -2245,7 +2245,7 @@ func (s *EtcdServer) updateClusterVersionV3(ver string) { lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver))) return - case etcderrors.ErrStopped: + case errors.ErrStopped: lg.Warn("aborting cluster version update; server is stopped", zap.Error(err)) return @@ -2278,7 +2278,7 @@ func (s *EtcdServer) monitorDowngrade() { func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { switch err { case context.Canceled: - return etcderrors.ErrCanceled + return errors.ErrCanceled case context.DeadlineExceeded: s.leadTimeMu.RLock() @@ -2286,7 +2286,7 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { s.leadTimeMu.RUnlock() prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) if start.After(prevLeadLost) && start.Before(curLeadElected) { - return etcderrors.ErrTimeoutDueToLeaderFail + return errors.ErrTimeoutDueToLeaderFail } lead := types.ID(s.getLead()) switch lead { @@ -2294,14 +2294,14 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { // TODO: return error to specify it happens because the cluster does not have leader now case s.MemberId(): if !isConnectedToQuorumSince(s.r.transport, start, s.MemberId(), s.cluster.Members()) { - return etcderrors.ErrTimeoutDueToConnectionLost + return errors.ErrTimeoutDueToConnectionLost } default: if !isConnectedSince(s.r.transport, start, lead) { - return etcderrors.ErrTimeoutDueToConnectionLost + return errors.ErrTimeoutDueToConnectionLost } } - return etcderrors.ErrTimeout + return errors.ErrTimeout default: return err diff --git a/server/etcdserver/server_test.go b/server/etcdserver/server_test.go index fe770ec098b..cb86eb4a8b7 100644 --- a/server/etcdserver/server_test.go +++ b/server/etcdserver/server_test.go @@ -48,7 +48,7 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply" "go.etcd.io/etcd/server/v3/etcdserver/cindex" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/mock/mockstorage" "go.etcd.io/etcd/server/v3/mock/mockstore" @@ -97,7 +97,7 @@ func TestDoLocalAction(t *testing.T) { }, { pb.Request{Method: "BADMETHOD", ID: 1}, - Response{}, etcderrors.ErrUnknownMethod, []testutil.Action{}, + Response{}, errors.ErrUnknownMethod, []testutil.Action{}, }, } for i, tt := range tests { @@ -463,7 +463,7 @@ func TestApplyRequest(t *testing.T) { // Unknown method - error { pb.Request{Method: "BADMETHOD", ID: 1}, - Response{Err: etcderrors.ErrUnknownMethod}, + Response{Err: errors.ErrUnknownMethod}, []testutil.Action{}, }, } @@ -830,8 +830,8 @@ func TestDoProposalCancelled(t *testing.T) { cancel() _, err := srv.Do(ctx, pb.Request{Method: "PUT"}) - if err != etcderrors.ErrCanceled { - t.Fatalf("err = %v, want %v", err, etcderrors.ErrCanceled) + if err != errors.ErrCanceled { + t.Fatalf("err = %v, want %v", err, errors.ErrCanceled) } w := []testutil.Action{{Name: "Register"}, {Name: "Trigger"}} if !reflect.DeepEqual(wt.Action(), w) { @@ -853,8 +853,8 @@ func TestDoProposalTimeout(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 0) _, err := srv.Do(ctx, pb.Request{Method: "PUT"}) cancel() - if err != etcderrors.ErrTimeout { - t.Fatalf("err = %v, want %v", err, etcderrors.ErrTimeout) + if err != errors.ErrTimeout { + t.Fatalf("err = %v, want %v", err, errors.ErrTimeout) } } @@ -872,8 +872,8 @@ func TestDoProposalStopped(t *testing.T) { srv.stopping = make(chan struct{}) close(srv.stopping) _, err := srv.Do(context.Background(), pb.Request{Method: "PUT", ID: 1}) - if err != etcderrors.ErrStopped { - t.Errorf("err = %v, want %v", err, etcderrors.ErrStopped) + if err != errors.ErrStopped { + t.Errorf("err = %v, want %v", err, errors.ErrStopped) } } @@ -1944,14 +1944,14 @@ func TestWaitAppliedIndex(t *testing.T) { action: func(s *EtcdServer) { s.stopping <- struct{}{} }, - ExpectedError: etcderrors.ErrStopped, + ExpectedError: errors.ErrStopped, }, { name: "Timed out waiting for the applied index", appliedIndex: 10, committedIndex: 12, action: nil, - ExpectedError: etcderrors.ErrTimeoutWaitAppliedIndex, + ExpectedError: errors.ErrTimeoutWaitAppliedIndex, }, } for _, tc := range cases { diff --git a/server/etcdserver/txn/txn.go b/server/etcdserver/txn/txn.go index 7e4d5a1ab75..b3b5f2c4e20 100644 --- a/server/etcdserver/txn/txn.go +++ b/server/etcdserver/txn/txn.go @@ -23,7 +23,7 @@ import ( "go.etcd.io/etcd/api/v3/mvccpb" "go.etcd.io/etcd/pkg/v3/traceutil" "go.etcd.io/etcd/server/v3/auth" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/storage/mvcc" "go.uber.org/zap" @@ -65,7 +65,7 @@ func Put(ctx context.Context, lg *zap.Logger, lessor lease.Lessor, kv mvcc.KV, t if p.IgnoreValue || p.IgnoreLease { if rr == nil || len(rr.KVs) == 0 { // ignore_{lease,value} flag expects previous key-value pair - return nil, nil, etcderrors.ErrKeyNotFound + return nil, nil, errors.ErrKeyNotFound } } if p.IgnoreValue { @@ -378,7 +378,7 @@ func checkRequestPut(rv mvcc.ReadView, lessor lease.Lessor, reqOp *pb.RequestOp) return err } if rr == nil || len(rr.KVs) == 0 { - return etcderrors.ErrKeyNotFound + return errors.ErrKeyNotFound } } if lease.LeaseID(req.Lease) != lease.NoLease { diff --git a/server/etcdserver/v2_server.go b/server/etcdserver/v2_server.go index 22709762ee2..517d7ca7f70 100644 --- a/server/etcdserver/v2_server.go +++ b/server/etcdserver/v2_server.go @@ -21,7 +21,7 @@ import ( pb "go.etcd.io/etcd/api/v3/etcdserverpb" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" "go.etcd.io/etcd/server/v3/etcdserver/api/v2store" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" ) type RequestV2 pb.Request @@ -117,7 +117,7 @@ func (a *reqV2HandlerEtcdServer) processRaftRequest(ctx context.Context, r *Requ return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start) case <-a.s.stopping: } - return Response{}, etcderrors.ErrStopped + return Response{}, errors.ErrStopped } func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) { @@ -158,7 +158,7 @@ func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Respons case "HEAD": return v2api.Head(ctx, r) } - return Response{}, etcderrors.ErrUnknownMethod + return Response{}, errors.ErrUnknownMethod } func (r *RequestV2) String() string { diff --git a/server/etcdserver/v3_server.go b/server/etcdserver/v3_server.go index da6542299c6..e5c26727968 100644 --- a/server/etcdserver/v3_server.go +++ b/server/etcdserver/v3_server.go @@ -29,7 +29,7 @@ import ( "go.etcd.io/etcd/server/v3/auth" "go.etcd.io/etcd/server/v3/etcdserver/api/membership" apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" "go.etcd.io/etcd/server/v3/etcdserver/txn" "go.etcd.io/etcd/server/v3/lease" "go.etcd.io/etcd/server/v3/lease/leasehttp" @@ -260,9 +260,9 @@ func (s *EtcdServer) waitAppliedIndex() error { select { case <-s.ApplyWait(): case <-s.stopping: - return etcderrors.ErrStopped + return errors.ErrStopped case <-time.After(applyTimeout): - return etcderrors.ErrTimeoutWaitAppliedIndex + return errors.ErrTimeoutWaitAppliedIndex } return nil @@ -312,9 +312,9 @@ func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, e } if cctx.Err() == context.DeadlineExceeded { - return -1, etcderrors.ErrTimeout + return -1, errors.ErrTimeout } - return -1, etcderrors.ErrCanceled + return -1, errors.ErrCanceled } func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { @@ -362,9 +362,9 @@ func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveR } if cctx.Err() == context.DeadlineExceeded { - return nil, etcderrors.ErrTimeout + return nil, errors.ErrTimeout } - return nil, etcderrors.ErrCanceled + return nil, errors.ErrCanceled } func (s *EtcdServer) newHeader() *pb.ResponseHeader { @@ -395,13 +395,13 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) case <-time.After(dur): leader = s.cluster.Member(s.Leader()) case <-s.stopping: - return nil, etcderrors.ErrStopped + return nil, errors.ErrStopped case <-ctx.Done(): - return nil, etcderrors.ErrNoLeader + return nil, errors.ErrNoLeader } } if len(leader.PeerURLs) == 0 { - return nil, etcderrors.ErrNoLeader + return nil, errors.ErrNoLeader } return leader, nil } @@ -660,7 +660,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In ai := s.getAppliedIndex() ci := s.getCommittedIndex() if ci > ai+maxGapBetweenApplyAndCommitIndex { - return nil, etcderrors.ErrTooManyRequests + return nil, errors.ErrTooManyRequests } r.Header = &pb.RequestHeader{ @@ -685,7 +685,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In } if len(data) > int(s.Cfg.MaxRequestBytes) { - return nil, etcderrors.ErrRequestTooLarge + return nil, errors.ErrRequestTooLarge } id := r.ID @@ -715,7 +715,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In s.w.Trigger(id, nil) // GC wait return nil, s.parseProposeCtxErr(cctx.Err(), start) case <-s.done: - return nil, etcderrors.ErrStopped + return nil, errors.ErrStopped } } @@ -776,7 +776,7 @@ func (s *EtcdServer) linearizableReadLoop() { } func isStopped(err error) bool { - return err == raft.ErrStopped || err == etcderrors.ErrStopped + return err == raft.ErrStopped || err == errors.ErrStopped } func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, requestId uint64) (uint64, error) { @@ -817,7 +817,7 @@ func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, case <-leaderChangedNotifier: readIndexFailed.Inc() // return a retryable error. - return 0, etcderrors.ErrLeaderChanged + return 0, errors.ErrLeaderChanged case <-firstCommitInTermNotifier: firstCommitInTermNotifier = s.firstCommitInTerm.Receive() lg.Info("first commit in current term: resending ReadIndex request") @@ -845,9 +845,9 @@ func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, zap.Duration("timeout", s.Cfg.ReqTimeout()), ) slowReadIndex.Inc() - return 0, etcderrors.ErrTimeout + return 0, errors.ErrTimeout case <-s.stopping: - return 0, etcderrors.ErrStopped + return 0, errors.ErrStopped } } } @@ -898,7 +898,7 @@ func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error { case <-ctx.Done(): return ctx.Err() case <-s.done: - return etcderrors.ErrStopped + return errors.ErrStopped } } @@ -923,7 +923,7 @@ func (s *EtcdServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb case pb.DowngradeRequest_CANCEL: return s.downgradeCancel(ctx) default: - return nil, etcderrors.ErrUnknownMethod + return nil, errors.ErrUnknownMethod } } @@ -937,7 +937,7 @@ func (s *EtcdServer) downgradeValidate(ctx context.Context, v string) (*pb.Downg cv := s.ClusterVersion() if cv == nil { - return nil, etcderrors.ErrClusterVersionUnavailable + return nil, errors.ErrClusterVersionUnavailable } resp.Version = version.Cluster(cv.String()) err = s.Version().DowngradeValidate(ctx, targetVersion) diff --git a/tests/functional/tester/stresser_key.go b/tests/functional/tester/stresser_key.go index aabf8cdf45c..8d505b84c42 100644 --- a/tests/functional/tester/stresser_key.go +++ b/tests/functional/tester/stresser_key.go @@ -26,7 +26,7 @@ import ( "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/raft/v3" - "go.etcd.io/etcd/server/v3/etcdserver/etcderrors" + "go.etcd.io/etcd/server/v3/etcdserver/errors" "go.etcd.io/etcd/tests/v3/functional/rpcpb" "go.uber.org/zap" @@ -153,13 +153,13 @@ func (s *keyStresser) isRetryableError(err error) bool { // as well. We want to keep stressing until the cluster elects a // new leader and start processing requests again. return true - case etcderrors.ErrTimeoutDueToLeaderFail.Error(), etcderrors.ErrTimeout.Error(): + case errors.ErrTimeoutDueToLeaderFail.Error(), errors.ErrTimeout.Error(): // This retries when request is triggered at the same time as // leader failure and follower nodes receive time out errors // from losing their leader. Followers should retry to connect // to the new leader. return true - case etcderrors.ErrStopped.Error(): + case errors.ErrStopped.Error(): // one of the etcd nodes stopped from failure injection return true case rpctypes.ErrNotCapable.Error(): From 0da0cf4795529238766d150b4c72211748dad048 Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Thu, 28 Apr 2022 16:51:34 +0200 Subject: [PATCH 14/18] expose UberApplier as interface (not as implementation struct). --- server/etcdserver/apply/uber_applier.go | 29 +++++++++++++++---------- server/etcdserver/server.go | 4 ++-- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/server/etcdserver/apply/uber_applier.go b/server/etcdserver/apply/uber_applier.go index df94902d152..f4acccf66b9 100644 --- a/server/etcdserver/apply/uber_applier.go +++ b/server/etcdserver/apply/uber_applier.go @@ -30,7 +30,11 @@ import ( "go.uber.org/zap" ) -type UberApplier struct { +type UberApplier interface { + Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult +} + +type uberApplier struct { lg *zap.Logger alarmStore *v3alarm.AlarmStore @@ -56,17 +60,17 @@ func NewUberApplier( consistentIndex cindex.ConsistentIndexer, warningApplyDuration time.Duration, txnModeWriteWithSharedBuffer bool, - quotaBackendBytesCfg int64) *UberApplier { + quotaBackendBytesCfg int64) UberApplier { applyV3base_ := newApplierV3(lg, be, kv, alarmStore, authStore, lessor, cluster, raftStatus, snapshotServer, consistentIndex, txnModeWriteWithSharedBuffer, quotaBackendBytesCfg) - ua := &UberApplier{ + ua := &uberApplier{ lg: lg, alarmStore: alarmStore, warningApplyDuration: warningApplyDuration, applyV3: applyV3base_, applyV3base: applyV3base_, } - ua.RestoreAlarms() + ua.restoreAlarms() return ua } @@ -91,7 +95,7 @@ func newApplierV3( ) } -func (a *UberApplier) RestoreAlarms() { +func (a *uberApplier) restoreAlarms() { noSpaceAlarms := len(a.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 corruptAlarms := len(a.alarmStore.Get(pb.AlarmType_CORRUPT)) > 0 a.applyV3 = a.applyV3base @@ -103,14 +107,17 @@ func (a *UberApplier) RestoreAlarms() { } } -func (a *UberApplier) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult { - // We first execute chain of WrapApply across all objects (e.g. CorruptApplier -> CappedApplier -> Auth -> Quota -> Backend), - // than dispatch(), than individual methods wrappers CorruptApplier.Put(CappedApplier.Put(... BackendApplier.Put()))) +func (a *uberApplier) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult { + // We first execute chain of WrapApply() calls down the hierarchy: + // (i.e. CorruptApplier -> CappedApplier -> Auth -> Quota -> Backend), + // then dispatch() unpacks the request to a specific method (like Put), + // that gets executed down the hierarchy again: + // i.e. CorruptApplier.Put(CappedApplier.Put(...(BackendApplier.Put(...)))). return a.applyV3.WrapApply(context.TODO(), r, shouldApplyV3, a.dispatch) } // This function -func (a *UberApplier) dispatch(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult { +func (a *uberApplier) dispatch(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult { op := "unknown" ar := &ApplyResult{} defer func(start time.Time) { @@ -225,12 +232,12 @@ func (a *UberApplier) dispatch(ctx context.Context, r *pb.InternalRaftRequest, s return ar } -func (a *UberApplier) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { +func (a *uberApplier) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) { resp, err := a.applyV3.Alarm(ar) if ar.Action == pb.AlarmRequest_ACTIVATE || ar.Action == pb.AlarmRequest_DEACTIVATE { - a.RestoreAlarms() + a.restoreAlarms() } return resp, err } diff --git a/server/etcdserver/server.go b/server/etcdserver/server.go index be96f326b60..71e499be5d4 100644 --- a/server/etcdserver/server.go +++ b/server/etcdserver/server.go @@ -252,7 +252,7 @@ type EtcdServer struct { applyV2 ApplierV2 - uberApply *apply.UberApplier + uberApply apply.UberApplier applyWait wait.WaitTime @@ -1077,7 +1077,7 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, toApply *toApply) { s.uberApply = s.NewUberApplier() } -func (s *EtcdServer) NewUberApplier() *apply.UberApplier { +func (s *EtcdServer) NewUberApplier() apply.UberApplier { return apply.NewUberApplier(s.lg, s.be, s.KV(), s.alarmStore, s.authStore, s.lessor, s.cluster, s, s, s.consistIndex, s.Cfg.WarningApplyDuration, s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer, s.Cfg.QuotaBackendBytes) } From 42c6e08f22b7b93166c25f093b0cd0364ed3dbaa Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Thu, 28 Apr 2022 16:56:23 +0200 Subject: [PATCH 15/18] Rename the txn, so as not to be the same as the package name. --- server/etcdserver/txn/txn.go | 82 ++++++++++++++++++------------------ 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/server/etcdserver/txn/txn.go b/server/etcdserver/txn/txn.go index b3b5f2c4e20..36782d34b61 100644 --- a/server/etcdserver/txn/txn.go +++ b/server/etcdserver/txn/txn.go @@ -29,7 +29,7 @@ import ( "go.uber.org/zap" ) -func Put(ctx context.Context, lg *zap.Logger, lessor lease.Lessor, kv mvcc.KV, txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { +func Put(ctx context.Context, lg *zap.Logger, lessor lease.Lessor, kv mvcc.KV, txnWrite mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { resp = &pb.PutResponse{} resp.Header = &pb.ResponseHeader{} trace = traceutil.Get(ctx) @@ -42,20 +42,20 @@ func Put(ctx context.Context, lg *zap.Logger, lessor lease.Lessor, kv mvcc.KV, t ) } val, leaseID := p.Value, lease.LeaseID(p.Lease) - if txn == nil { + if txnWrite == nil { if leaseID != lease.NoLease { if l := lessor.Lookup(leaseID); l == nil { return nil, nil, lease.ErrLeaseNotFound } } - txn = kv.Write(trace) - defer txn.End() + txnWrite = kv.Write(trace) + defer txnWrite.End() } var rr *mvcc.RangeResult if p.IgnoreValue || p.IgnoreLease || p.PrevKv { trace.StepWithFunction(func() { - rr, err = txn.Range(context.TODO(), p.Key, nil, mvcc.RangeOptions{}) + rr, err = txnWrite.Range(context.TODO(), p.Key, nil, mvcc.RangeOptions{}) }, "get previous kv pair") if err != nil { @@ -80,23 +80,23 @@ func Put(ctx context.Context, lg *zap.Logger, lessor lease.Lessor, kv mvcc.KV, t } } - resp.Header.Revision = txn.Put(p.Key, val, leaseID) + resp.Header.Revision = txnWrite.Put(p.Key, val, leaseID) trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision}) return resp, trace, nil } -func DeleteRange(kv mvcc.KV, txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { +func DeleteRange(kv mvcc.KV, txnWrite mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { resp := &pb.DeleteRangeResponse{} resp.Header = &pb.ResponseHeader{} end := mkGteRange(dr.RangeEnd) - if txn == nil { - txn = kv.Write(traceutil.TODO()) - defer txn.End() + if txnWrite == nil { + txnWrite = kv.Write(traceutil.TODO()) + defer txnWrite.End() } if dr.PrevKv { - rr, err := txn.Range(context.TODO(), dr.Key, end, mvcc.RangeOptions{}) + rr, err := txnWrite.Range(context.TODO(), dr.Key, end, mvcc.RangeOptions{}) if err != nil { return nil, err } @@ -108,19 +108,19 @@ func DeleteRange(kv mvcc.KV, txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb. } } - resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, end) + resp.Deleted, resp.Header.Revision = txnWrite.DeleteRange(dr.Key, end) return resp, nil } -func Range(ctx context.Context, lg *zap.Logger, kv mvcc.KV, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { +func Range(ctx context.Context, lg *zap.Logger, kv mvcc.KV, txnRead mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { trace := traceutil.Get(ctx) resp := &pb.RangeResponse{} resp.Header = &pb.ResponseHeader{} - if txn == nil { - txn = kv.Read(mvcc.ConcurrentReadTxMode, trace) - defer txn.End() + if txnRead == nil { + txnRead = kv.Read(mvcc.ConcurrentReadTxMode, trace) + defer txnRead.End() } limit := r.Limit @@ -141,7 +141,7 @@ func Range(ctx context.Context, lg *zap.Logger, kv mvcc.KV, txn mvcc.TxnRead, r Count: r.CountOnly, } - rr, err := txn.Range(ctx, r.Key, mkGteRange(r.RangeEnd), ro) + rr, err := txnRead.Range(ctx, r.Key, mkGteRange(r.RangeEnd), ro) if err != nil { return nil, err } @@ -227,50 +227,50 @@ func Txn(ctx context.Context, lg *zap.Logger, rt *pb.TxnRequest, txnModeWriteWit // When the transaction contains write operations, we use ReadTx instead of // ConcurrentReadTx to avoid extra overhead of copying buffer. - var txn mvcc.TxnWrite + var txnWrite mvcc.TxnWrite if isWrite && txnModeWriteWithSharedBuffer /*a.s.Cfg.ExperimentalTxnModeWriteWithSharedBuffer*/ { - txn = mvcc.NewReadOnlyTxnWrite(kv.Read(mvcc.SharedBufReadTxMode, trace)) + txnWrite = mvcc.NewReadOnlyTxnWrite(kv.Read(mvcc.SharedBufReadTxMode, trace)) } else { - txn = mvcc.NewReadOnlyTxnWrite(kv.Read(mvcc.ConcurrentReadTxMode, trace)) + txnWrite = mvcc.NewReadOnlyTxnWrite(kv.Read(mvcc.ConcurrentReadTxMode, trace)) } var txnPath []bool trace.StepWithFunction( func() { - txnPath = compareToPath(txn, rt) + txnPath = compareToPath(txnWrite, rt) }, "compare", ) if isWrite { trace.AddField(traceutil.Field{Key: "read_only", Value: false}) - if _, err := checkRequests(txn, rt, txnPath, + if _, err := checkRequests(txnWrite, rt, txnPath, func(rv mvcc.ReadView, ro *pb.RequestOp) error { return checkRequestPut(rv, lessor, ro) }); err != nil { - txn.End() + txnWrite.End() return nil, nil, err } } - if _, err := checkRequests(txn, rt, txnPath, checkRequestRange); err != nil { - txn.End() + if _, err := checkRequests(txnWrite, rt, txnPath, checkRequestRange); err != nil { + txnWrite.End() return nil, nil, err } trace.Step("check requests") txnResp, _ := newTxnResp(rt, txnPath) - // When executing mutable txn ops, etcd must hold the txn lock so + // When executing mutable txnWrite ops, etcd must hold the txnWrite lock so // readers do not see any intermediate results. Since writes are // serialized on the raft loop, the revision in the read view will - // be the revision of the write txn. + // be the revision of the write txnWrite. if isWrite { - txn.End() - txn = kv.Write(trace) + txnWrite.End() + txnWrite = kv.Write(trace) } - applyTxn(ctx, lg, kv, lessor, txn, rt, txnPath, txnResp) - rev := txn.Rev() - if len(txn.Changes()) != 0 { + applyTxn(ctx, lg, kv, lessor, txnWrite, rt, txnPath, txnResp) + rev := txnWrite.Rev() + if len(txnWrite.Changes()) != 0 { rev++ } - txn.End() + txnWrite.End() txnResp.Header.Revision = rev trace.AddField( @@ -311,7 +311,7 @@ func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txn return txnResp, txnCount } -func applyTxn(ctx context.Context, lg *zap.Logger, kv mvcc.KV, lessor lease.Lessor, txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int) { +func applyTxn(ctx context.Context, lg *zap.Logger, kv mvcc.KV, lessor lease.Lessor, txnWrite mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int) { trace := traceutil.Get(ctx) reqs := rt.Success if !txnPath[0] { @@ -326,9 +326,9 @@ func applyTxn(ctx context.Context, lg *zap.Logger, kv mvcc.KV, lessor lease.Less traceutil.Field{Key: "req_type", Value: "range"}, traceutil.Field{Key: "range_begin", Value: string(tv.RequestRange.Key)}, traceutil.Field{Key: "range_end", Value: string(tv.RequestRange.RangeEnd)}) - resp, err := Range(ctx, lg, kv, txn, tv.RequestRange) + resp, err := Range(ctx, lg, kv, txnWrite, tv.RequestRange) if err != nil { - lg.Panic("unexpected error during txn", zap.Error(err)) + lg.Panic("unexpected error during txnWrite", zap.Error(err)) } respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp trace.StopSubTrace() @@ -337,21 +337,21 @@ func applyTxn(ctx context.Context, lg *zap.Logger, kv mvcc.KV, lessor lease.Less traceutil.Field{Key: "req_type", Value: "put"}, traceutil.Field{Key: "key", Value: string(tv.RequestPut.Key)}, traceutil.Field{Key: "req_size", Value: tv.RequestPut.Size()}) - resp, _, err := Put(ctx, lg, lessor, kv, txn, tv.RequestPut) + resp, _, err := Put(ctx, lg, lessor, kv, txnWrite, tv.RequestPut) if err != nil { - lg.Panic("unexpected error during txn", zap.Error(err)) + lg.Panic("unexpected error during txnWrite", zap.Error(err)) } respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp trace.StopSubTrace() case *pb.RequestOp_RequestDeleteRange: - resp, err := DeleteRange(kv, txn, tv.RequestDeleteRange) + resp, err := DeleteRange(kv, txnWrite, tv.RequestDeleteRange) if err != nil { - lg.Panic("unexpected error during txn", zap.Error(err)) + lg.Panic("unexpected error during txnWrite", zap.Error(err)) } respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp case *pb.RequestOp_RequestTxn: resp := respi.(*pb.ResponseOp_ResponseTxn).ResponseTxn - applyTxns := applyTxn(ctx, lg, kv, lessor, txn, tv.RequestTxn, txnPath[1:], resp) + applyTxns := applyTxn(ctx, lg, kv, lessor, txnWrite, tv.RequestTxn, txnPath[1:], resp) txns += applyTxns + 1 txnPath = txnPath[applyTxns+1:] default: From 87b80f16caccf3a96b8008063eaf91d9d27e223b Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Fri, 13 May 2022 13:42:09 +0200 Subject: [PATCH 16/18] Fixing missing comment on the dispatch() function. --- server/etcdserver/apply/uber_applier.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/etcdserver/apply/uber_applier.go b/server/etcdserver/apply/uber_applier.go index f4acccf66b9..25a2f180195 100644 --- a/server/etcdserver/apply/uber_applier.go +++ b/server/etcdserver/apply/uber_applier.go @@ -116,7 +116,8 @@ func (a *uberApplier) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership. return a.applyV3.WrapApply(context.TODO(), r, shouldApplyV3, a.dispatch) } -// This function +// dispatch translates the request (r) into appropriate call (like Put) on +// the underlying applyV3 object. func (a *uberApplier) dispatch(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult { op := "unknown" ar := &ApplyResult{} From 85b18c9b3e2e63ee8fe7685e0c1b2c5e9abc47cc Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Fri, 13 May 2022 14:18:28 +0200 Subject: [PATCH 17/18] Rename WrapApply to Apply. --- server/etcdserver/apply/apply.go | 11 ++++++----- server/etcdserver/apply/apply_auth.go | 6 +++--- server/etcdserver/apply/uber_applier.go | 12 ++++++------ server/etcdserver/server.go | 2 +- server/etcdserver/server_test.go | 2 +- server/etcdserver/v3_server.go | 4 ++-- 6 files changed, 19 insertions(+), 18 deletions(-) diff --git a/server/etcdserver/apply/apply.go b/server/etcdserver/apply/apply.go index 849e0ddc4e1..3014520b007 100644 --- a/server/etcdserver/apply/apply.go +++ b/server/etcdserver/apply/apply.go @@ -52,7 +52,7 @@ type RaftStatusGetter interface { Term() uint64 } -type ApplyResult struct { +type Result struct { Resp proto.Message Err error // Physc signals the physical effect of the request has completed in addition @@ -62,12 +62,13 @@ type ApplyResult struct { Trace *traceutil.Trace } -type ApplyFunc func(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult +type applyFunc func(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *Result // applierV3 is the interface for processing V3 raft messages type applierV3 interface { - WrapApply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc ApplyFunc) *ApplyResult - //Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult + // Apply executes the generic portion of application logic for the current applier, but + // delegates the actual execution to the applyFunc method. + Apply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc applyFunc) *Result Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) @@ -151,7 +152,7 @@ func newApplierV3Backend( txnModeWriteWithSharedBuffer: txnModeWriteWithSharedBuffer} } -func (a *applierV3backend) WrapApply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc ApplyFunc) *ApplyResult { +func (a *applierV3backend) Apply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc applyFunc) *Result { return applyFunc(ctx, r, shouldApplyV3) } diff --git a/server/etcdserver/apply/apply_auth.go b/server/etcdserver/apply/apply_auth.go index 947d82840e9..fc94e88b39e 100644 --- a/server/etcdserver/apply/apply_auth.go +++ b/server/etcdserver/apply/apply_auth.go @@ -43,7 +43,7 @@ func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *a return &authApplierV3{applierV3: base, as: as, lessor: lessor} } -func (aa *authApplierV3) WrapApply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc ApplyFunc) *ApplyResult { +func (aa *authApplierV3) Apply(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3, applyFunc applyFunc) *Result { aa.mu.Lock() defer aa.mu.Unlock() if r.Header != nil { @@ -56,10 +56,10 @@ func (aa *authApplierV3) WrapApply(ctx context.Context, r *pb.InternalRaftReques if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil { aa.authInfo.Username = "" aa.authInfo.Revision = 0 - return &ApplyResult{Err: err} + return &Result{Err: err} } } - ret := aa.applierV3.WrapApply(ctx, r, shouldApplyV3, applyFunc) + ret := aa.applierV3.Apply(ctx, r, shouldApplyV3, applyFunc) aa.authInfo.Username = "" aa.authInfo.Revision = 0 return ret diff --git a/server/etcdserver/apply/uber_applier.go b/server/etcdserver/apply/uber_applier.go index 25a2f180195..50f8ba4b15e 100644 --- a/server/etcdserver/apply/uber_applier.go +++ b/server/etcdserver/apply/uber_applier.go @@ -31,7 +31,7 @@ import ( ) type UberApplier interface { - Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult + Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *Result } type uberApplier struct { @@ -107,20 +107,20 @@ func (a *uberApplier) restoreAlarms() { } } -func (a *uberApplier) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult { - // We first execute chain of WrapApply() calls down the hierarchy: +func (a *uberApplier) Apply(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *Result { + // We first execute chain of Apply() calls down the hierarchy: // (i.e. CorruptApplier -> CappedApplier -> Auth -> Quota -> Backend), // then dispatch() unpacks the request to a specific method (like Put), // that gets executed down the hierarchy again: // i.e. CorruptApplier.Put(CappedApplier.Put(...(BackendApplier.Put(...)))). - return a.applyV3.WrapApply(context.TODO(), r, shouldApplyV3, a.dispatch) + return a.applyV3.Apply(context.TODO(), r, shouldApplyV3, a.dispatch) } // dispatch translates the request (r) into appropriate call (like Put) on // the underlying applyV3 object. -func (a *uberApplier) dispatch(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *ApplyResult { +func (a *uberApplier) dispatch(ctx context.Context, r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *Result { op := "unknown" - ar := &ApplyResult{} + ar := &Result{} defer func(start time.Time) { success := ar.Err == nil || ar.Err == mvcc.ErrCompacted txn.ApplySecObserve(v3Version, op, success, time.Since(start)) diff --git a/server/etcdserver/server.go b/server/etcdserver/server.go index 71e499be5d4..a64939e343d 100644 --- a/server/etcdserver/server.go +++ b/server/etcdserver/server.go @@ -1829,7 +1829,7 @@ func (s *EtcdServer) apply( func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { shouldApplyV3 := membership.ApplyV2storeOnly applyV3Performed := false - var ar *apply.ApplyResult + var ar *apply.Result index := s.consistIndex.ConsistentIndex() if e.Index > index { // set the consistent index of current executing entry diff --git a/server/etcdserver/server_test.go b/server/etcdserver/server_test.go index cb86eb4a8b7..f60f73f4d36 100644 --- a/server/etcdserver/server_test.go +++ b/server/etcdserver/server_test.go @@ -1479,7 +1479,7 @@ func TestPublishV3(t *testing.T) { n := newNodeRecorder() ch := make(chan interface{}, 1) // simulate that request has gone through consensus - ch <- &apply2.ApplyResult{} + ch <- &apply2.Result{} w := wait.NewWithResponse(ch) ctx, cancel := context.WithCancel(context.Background()) lg := zaptest.NewLogger(t) diff --git a/server/etcdserver/v3_server.go b/server/etcdserver/v3_server.go index e5c26727968..63a190e6ed6 100644 --- a/server/etcdserver/v3_server.go +++ b/server/etcdserver/v3_server.go @@ -656,7 +656,7 @@ func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) e return nil } -func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*apply2.ApplyResult, error) { +func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*apply2.Result, error) { ai := s.getAppliedIndex() ci := s.getCommittedIndex() if ci > ai+maxGapBetweenApplyAndCommitIndex { @@ -709,7 +709,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In select { case x := <-ch: - return x.(*apply2.ApplyResult), nil + return x.(*apply2.Result), nil case <-cctx.Done(): proposalsFailed.Inc() s.w.Trigger(id, nil) // GC wait From 41ff2370e98740e3ef0f788b3fea5379258c7090 Mon Sep 17 00:00:00 2001 From: Piotr Tabor Date: Fri, 20 May 2022 14:42:39 +0200 Subject: [PATCH 18/18] Remove unused code and apply code-quality suggestions. --- server/etcdserver/apply/apply.go | 10 +++--- server/etcdserver/apply/apply_auth.go | 49 ++------------------------- server/etcdserver/apply/corrupt.go | 14 ++++---- 3 files changed, 15 insertions(+), 58 deletions(-) diff --git a/server/etcdserver/apply/apply.go b/server/etcdserver/apply/apply.go index 3014520b007..9fe77e91f4c 100644 --- a/server/etcdserver/apply/apply.go +++ b/server/etcdserver/apply/apply.go @@ -28,7 +28,7 @@ import ( "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm" "go.etcd.io/etcd/server/v3/etcdserver/cindex" "go.etcd.io/etcd/server/v3/etcdserver/errors" - mvcc_txn "go.etcd.io/etcd/server/v3/etcdserver/txn" + mvcctxn "go.etcd.io/etcd/server/v3/etcdserver/txn" "go.etcd.io/etcd/server/v3/etcdserver/version" "go.etcd.io/etcd/server/v3/lease" serverstorage "go.etcd.io/etcd/server/v3/storage" @@ -157,19 +157,19 @@ func (a *applierV3backend) Apply(ctx context.Context, r *pb.InternalRaftRequest, } func (a *applierV3backend) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) { - return mvcc_txn.Put(ctx, a.lg, a.lessor, a.kv, txn, p) + return mvcctxn.Put(ctx, a.lg, a.lessor, a.kv, txn, p) } func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - return mvcc_txn.DeleteRange(a.kv, txn, dr) + return mvcctxn.DeleteRange(a.kv, txn, dr) } func (a *applierV3backend) Range(ctx context.Context, txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { - return mvcc_txn.Range(ctx, a.lg, a.kv, txn, r) + return mvcctxn.Range(ctx, a.lg, a.kv, txn, r) } func (a *applierV3backend) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { - return mvcc_txn.Txn(ctx, a.lg, rt, a.txnModeWriteWithSharedBuffer, a.kv, a.lessor) + return mvcctxn.Txn(ctx, a.lg, rt, a.txnModeWriteWithSharedBuffer, a.kv, a.lessor) } func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { diff --git a/server/etcdserver/apply/apply_auth.go b/server/etcdserver/apply/apply_auth.go index fc94e88b39e..61f9f8892d2 100644 --- a/server/etcdserver/apply/apply_auth.go +++ b/server/etcdserver/apply/apply_auth.go @@ -108,49 +108,6 @@ func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest return aa.applierV3.DeleteRange(txn, r) } -func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { - for _, requ := range reqs { - switch tv := requ.Request.(type) { - case *pb.RequestOp_RequestRange: - if tv.RequestRange == nil { - continue - } - - if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil { - return err - } - - case *pb.RequestOp_RequestPut: - if tv.RequestPut == nil { - continue - } - - if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil { - return err - } - - case *pb.RequestOp_RequestDeleteRange: - if tv.RequestDeleteRange == nil { - continue - } - - if tv.RequestDeleteRange.PrevKv { - err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) - if err != nil { - return err - } - } - - err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd) - if err != nil { - return err - } - } - } - - return nil -} - func (aa *authApplierV3) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { if err := txn.CheckTxnAuth(aa.as, &aa.authInfo, rt); err != nil { return nil, nil, err @@ -166,9 +123,9 @@ func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevoke } func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error { - lease := aa.lessor.Lookup(leaseID) - if lease != nil { - for _, key := range lease.Keys() { + l := aa.lessor.Lookup(leaseID) + if l != nil { + for _, key := range l.Keys() { if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil { return err } diff --git a/server/etcdserver/apply/corrupt.go b/server/etcdserver/apply/corrupt.go index d8353514e09..040f294aeba 100644 --- a/server/etcdserver/apply/corrupt.go +++ b/server/etcdserver/apply/corrupt.go @@ -29,30 +29,30 @@ type applierV3Corrupt struct { func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} } -func (a *applierV3Corrupt) Put(ctx context.Context, txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { +func (a *applierV3Corrupt) Put(_ context.Context, _ mvcc.TxnWrite, _ *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) { return nil, nil, errors.ErrCorrupt } -func (a *applierV3Corrupt) Range(ctx context.Context, txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) { +func (a *applierV3Corrupt) Range(_ context.Context, _ mvcc.TxnRead, _ *pb.RangeRequest) (*pb.RangeResponse, error) { return nil, errors.ErrCorrupt } -func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { +func (a *applierV3Corrupt) DeleteRange(_ mvcc.TxnWrite, _ *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { return nil, errors.ErrCorrupt } -func (a *applierV3Corrupt) Txn(ctx context.Context, rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { +func (a *applierV3Corrupt) Txn(_ context.Context, _ *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) { return nil, nil, errors.ErrCorrupt } -func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { +func (a *applierV3Corrupt) Compaction(_ *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) { return nil, nil, nil, errors.ErrCorrupt } -func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { +func (a *applierV3Corrupt) LeaseGrant(_ *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { return nil, errors.ErrCorrupt } -func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { +func (a *applierV3Corrupt) LeaseRevoke(_ *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { return nil, errors.ErrCorrupt }