Skip to content

Commit

Permalink
[FAB-5284] Move kafka orderer to new message flow
Browse files Browse the repository at this point in the history
This patch accomplishs following tasks:

- A new message type `ConfigMsg` is added to `msgprocessor`, so that
  `ClassifyMsg` now spits three possible results: `ConfigUpdateMsg`,
  `ConfigMsg` or `NormalMsg`. This is for backwards compatibility.

- Remove second validation check in Kafka orderer unless config seq
  has advanced. This is to adapt to new message flow and benchmark
  suggests ~2x performance improvement. It's also backwards compatible
  with v1.0.x orderer. Re-submission of re-validated messages will be
  addressed by followup patches, see FAB-5720 for more information.

- `chain_test.go` is restructured to be more logically organized. This
  patch looks sizeable mostly because of this. No actual logic has been
  changed other than several newly added tests.

Change-Id: I5b231dcecdfefd17d4b0f0679a2cc7796db9242b
Signed-off-by: Jay Guo <guojiannan1101@gmail.com>
  • Loading branch information
guoger committed Sep 13, 2017
1 parent 368b92a commit 0ab835f
Show file tree
Hide file tree
Showing 7 changed files with 1,698 additions and 970 deletions.
6 changes: 5 additions & 1 deletion orderer/common/msgprocessor/msgprocessor.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,13 @@ const (
// Messages of this type should be processed by ProcessNormalMsg.
NormalMsg Classification = iota

// ConfigUpdateMsg is the class of configuration related messages.
// ConfigUpdateMsg indicates messages of type CONFIG_UPDATE.
// Messages of this type should be processed by ProcessConfigUpdateMsg.
ConfigUpdateMsg

// ConfigMsg indicates message of type ORDERER_TRANSACTION or CONFIG.
// Messages of this type should be processed by ProcessConfigMsg
ConfigMsg
)

// Processor provides the methods necessary to classify and process any message which
Expand Down
6 changes: 4 additions & 2 deletions orderer/common/msgprocessor/standardchannel.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,11 @@ func (s *StandardChannel) ClassifyMsg(chdr *cb.ChannelHeader) Classification {
case int32(cb.HeaderType_CONFIG_UPDATE):
return ConfigUpdateMsg
case int32(cb.HeaderType_ORDERER_TRANSACTION):
return ConfigUpdateMsg
// In order to maintain backwards compatibility, we must classify these messages
return ConfigMsg
case int32(cb.HeaderType_CONFIG):
return ConfigUpdateMsg
// In order to maintain backwards compatibility, we must classify these messages
return ConfigMsg
default:
return NormalMsg
}
Expand Down
4 changes: 2 additions & 2 deletions orderer/common/msgprocessor/standardchannel_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,11 @@ func TestClassifyMsg(t *testing.T) {
})
t.Run("OrdererTx", func(t *testing.T) {
class := (&StandardChannel{}).ClassifyMsg(&cb.ChannelHeader{Type: int32(cb.HeaderType_ORDERER_TRANSACTION)})
assert.Equal(t, class, ConfigUpdateMsg)
assert.Equal(t, class, ConfigMsg)
})
t.Run("ConfigTx", func(t *testing.T) {
class := (&StandardChannel{}).ClassifyMsg(&cb.ChannelHeader{Type: int32(cb.HeaderType_CONFIG)})
assert.Equal(t, class, ConfigUpdateMsg)
assert.Equal(t, class, ConfigMsg)
})
t.Run("EndorserTx", func(t *testing.T) {
class := (&StandardChannel{}).ClassifyMsg(&cb.ChannelHeader{Type: int32(cb.HeaderType_ENDORSER_TRANSACTION)})
Expand Down
4 changes: 3 additions & 1 deletion orderer/common/multichannel/util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func (mch *mockChain) Start() {

class := mch.support.ClassifyMsg(chdr)
switch class {
case msgprocessor.ConfigUpdateMsg:
case msgprocessor.ConfigMsg:
batch := mch.support.BlockCutter().Cut()
if batch != nil {
block := mch.support.CreateNextBlock(batch)
Expand All @@ -90,6 +90,8 @@ func (mch *mockChain) Start() {
block := mch.support.CreateNextBlock(batch)
mch.support.WriteBlock(block, nil)
}
case msgprocessor.ConfigUpdateMsg:
logger.Panicf("Not expecting msg class ConfigUpdateMsg here")
default:
logger.Panicf("Unsupported msg classification: %v", class)
}
Expand Down
187 changes: 132 additions & 55 deletions orderer/consensus/kafka/chain.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,37 +122,46 @@ func (chain *chainImpl) Halt() {

// Implements the consensus.Chain interface. Called by Broadcast().
func (chain *chainImpl) Order(env *cb.Envelope, configSeq uint64) error {
if !chain.enqueue(env) {
return fmt.Errorf("Could not enqueue")
marshaledEnv, err := utils.Marshal(env)
if err != nil {
return fmt.Errorf("cannot enqueue, unable to marshal envelope because = %s", err)
}
if !chain.enqueue(newNormalMessage(marshaledEnv, configSeq)) {
return fmt.Errorf("cannot enqueue")
}
return nil
}

// Implements the consensus.Chain interface. Called by Broadcast().
func (chain *chainImpl) Configure(config *cb.Envelope, configSeq uint64) error {
return chain.Order(config, configSeq)
marshaledConfig, err := utils.Marshal(config)
if err != nil {
return fmt.Errorf("cannot enqueue, unable to marshal config because = %s", err)
}
if !chain.enqueue(newConfigMessage(marshaledConfig, configSeq)) {
return fmt.Errorf("cannot enqueue")
}
return nil
}

// enqueue accepts a message and returns true on acceptance, or false otheriwse.
func (chain *chainImpl) enqueue(env *cb.Envelope) bool {
func (chain *chainImpl) enqueue(kafkaMsg *ab.KafkaMessage) bool {
logger.Debugf("[channel: %s] Enqueueing envelope...", chain.support.ChainID())
select {
case <-chain.startChan: // The Start phase has completed
select {
case <-chain.haltChan: // The chain has been halted, stop here
logger.Warningf("[channel: %s] Will not enqueue, consenter for this channel has been halted", chain.support.ChainID())
logger.Warningf("[channel: %s] consenter for this channel has been halted", chain.support.ChainID())
return false
default: // The post path
marshaledEnv, err := utils.Marshal(env)
payload, err := utils.Marshal(kafkaMsg)
if err != nil {
logger.Errorf("[channel: %s] cannot enqueue, unable to marshal envelope = %s", chain.support.ChainID(), err)
logger.Errorf("[channel: %s] unable to marshal Kafka message because = %s", chain.support.ChainID(), err)
return false
}
// We're good to go
payload := utils.MarshalOrPanic(newRegularMessage(marshaledEnv))
message := newProducerMessage(chain.channel, payload)
if _, _, err := chain.producer.SendMessage(message); err != nil {
logger.Errorf("[channel: %s] cannot enqueue envelope = %s", chain.support.ChainID(), err)
if _, _, err = chain.producer.SendMessage(message); err != nil {
logger.Errorf("[channel: %s] cannot enqueue envelope because = %s", chain.support.ChainID(), err)
return false
}
logger.Debugf("[channel: %s] Envelope enqueued successfully", chain.support.ChainID())
Expand Down Expand Up @@ -353,11 +362,25 @@ func newConnectMessage() *ab.KafkaMessage {
}
}

func newRegularMessage(payload []byte) *ab.KafkaMessage {
func newNormalMessage(payload []byte, configSeq uint64) *ab.KafkaMessage {
return &ab.KafkaMessage{
Type: &ab.KafkaMessage_Regular{
Regular: &ab.KafkaMessageRegular{
Payload: payload,
ConfigSeq: configSeq,
Class: ab.KafkaMessageRegular_NORMAL,
},
},
}
}

func newConfigMessage(config []byte, configSeq uint64) *ab.KafkaMessage {
return &ab.KafkaMessage{
Type: &ab.KafkaMessage_Regular{
Regular: &ab.KafkaMessageRegular{
Payload: payload,
Payload: config,
ConfigSeq: configSeq,
Class: ab.KafkaMessageRegular_CONFIG,
},
},
}
Expand Down Expand Up @@ -387,51 +410,13 @@ func processConnect(channelName string) error {
}

func processRegular(regularMessage *ab.KafkaMessageRegular, support consensus.ConsenterSupport, timer *<-chan time.Time, receivedOffset int64, lastCutBlockNumber *uint64) error {
env := new(cb.Envelope)
if err := proto.Unmarshal(regularMessage.Payload, env); err != nil {
// This shouldn't happen, it should be filtered at ingress
return fmt.Errorf("unmarshal/%s", err)
}

chdr, err := utils.ChannelHeader(env)
if err != nil {
logger.Panicf("If a message has arrived to this point, it should already have had its header inspected once")
}

class := support.ClassifyMsg(chdr)
switch class {
case msgprocessor.ConfigUpdateMsg:
_, err := support.ProcessNormalMsg(env)
if err != nil {
logger.Warningf("[channel: %s] Discarding bad config message: %s", support.ChainID(), err)
break
}

batch := support.BlockCutter().Cut()
if batch != nil {
block := support.CreateNextBlock(batch)
encodedLastOffsetPersisted := utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: receivedOffset - 1})
support.WriteBlock(block, encodedLastOffsetPersisted)
*lastCutBlockNumber++
}
block := support.CreateNextBlock([]*cb.Envelope{env})
encodedLastOffsetPersisted := utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: receivedOffset})
support.WriteConfigBlock(block, encodedLastOffsetPersisted)
*lastCutBlockNumber++
*timer = nil
case msgprocessor.NormalMsg:
_, err := support.ProcessNormalMsg(env)
if err != nil {
logger.Warningf("Discarding bad normal message: %s", err)
break
}

batches, pending := support.BlockCutter().Ordered(env)
commitNormalMsg := func(message *cb.Envelope) {
batches, pending := support.BlockCutter().Ordered(message)
logger.Debugf("[channel: %s] Ordering results: items in batch = %d, pending = %v", support.ChainID(), len(batches), pending)
if len(batches) == 0 && *timer == nil {
*timer = time.After(support.SharedConfig().BatchTimeout())
logger.Debugf("[channel: %s] Just began %s batch timer", support.ChainID(), support.SharedConfig().BatchTimeout().String())
return nil
return
}

offset := receivedOffset
Expand All @@ -453,9 +438,101 @@ func processRegular(regularMessage *ab.KafkaMessageRegular, support consensus.Co
if len(batches) > 0 {
*timer = nil
}
}

commitConfigMsg := func(message *cb.Envelope) {
logger.Debugf("[channel: %s] Received config message", support.ChainID())
batch := support.BlockCutter().Cut()

if batch != nil {
logger.Debugf("[channel: %s] Cut pending messages into block", support.ChainID())
block := support.CreateNextBlock(batch)
encodedLastOffsetPersisted := utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: receivedOffset - 1})
support.WriteBlock(block, encodedLastOffsetPersisted)
*lastCutBlockNumber++
}

logger.Debugf("[channel: %s] Creating isolated block for config message", support.ChainID())
block := support.CreateNextBlock([]*cb.Envelope{message})
encodedLastOffsetPersisted := utils.MarshalOrPanic(&ab.KafkaMetadata{LastOffsetPersisted: receivedOffset})
support.WriteConfigBlock(block, encodedLastOffsetPersisted)
*lastCutBlockNumber++
*timer = nil
}

seq := support.Sequence()

env := &cb.Envelope{}
if err := proto.Unmarshal(regularMessage.Payload, env); err != nil {
// This shouldn't happen, it should be filtered at ingress
return fmt.Errorf("failed to unmarshal payload of regular message because = %s", err)
}

logger.Debugf("[channel: %s] Processing regular Kafka message of type %s", support.ChainID(), regularMessage.Class.String())

switch regularMessage.Class {
case ab.KafkaMessageRegular_UNKNOWN:
// Received regular message of type UNKNOWN, indicating it's from v1.0.x orderer
chdr, err := utils.ChannelHeader(env)
if err != nil {
return fmt.Errorf("discarding bad config message because of channel header unmarshalling error = %s", err)
}

class := support.ClassifyMsg(chdr)
switch class {
case msgprocessor.ConfigMsg:
if _, _, err := support.ProcessConfigMsg(env); err != nil {
return fmt.Errorf("discarding bad config message because = %s", err)
}

commitConfigMsg(env)

case msgprocessor.NormalMsg:
if _, err := support.ProcessNormalMsg(env); err != nil {
return fmt.Errorf("discarding bad normal message because = %s", err)
}

commitNormalMsg(env)

case msgprocessor.ConfigUpdateMsg:
return fmt.Errorf("not expecting message of type ConfigUpdate")

default:
logger.Panicf("[channel: %s] Unsupported message classification: %v", support.ChainID(), class)
}

case ab.KafkaMessageRegular_NORMAL:
if regularMessage.ConfigSeq < seq {
logger.Debugf("[channel: %s] Config sequence has advanced since this normal message being validated, re-validating", support.ChainID())
if _, err := support.ProcessNormalMsg(env); err != nil {
return fmt.Errorf("discarding bad normal message because = %s", err)
}

// TODO re-submit stale normal message via `Order`, instead of discarding it immediately. Fix this as part of FAB-5720
return fmt.Errorf("discarding stale normal message because config seq has advanced")
}

commitNormalMsg(env)

case ab.KafkaMessageRegular_CONFIG:
if regularMessage.ConfigSeq < seq {
logger.Debugf("[channel: %s] Config sequence has advanced since this config message being validated, re-validating", support.ChainID())
_, _, err := support.ProcessConfigMsg(env)
if err != nil {
return fmt.Errorf("rejecting config message because = %s", err)
}

// TODO re-submit resulting config message via `Configure`, instead of discarding it. Fix this as part of FAB-5720
// Configure(configUpdateEnv, newConfigEnv, seq)
return fmt.Errorf("discarding stale config message because config seq has advanced")
}

commitConfigMsg(env)

default:
logger.Panicf("[channel: %s] Unsupported message classification: %v", support.ChainID(), class)
return fmt.Errorf("unsupported regular kafka message type: %v", regularMessage.Class.String())
}

return nil
}

Expand Down
Loading

0 comments on commit 0ab835f

Please sign in to comment.