Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: Add epochs to MBT #1676

Merged
merged 47 commits into from
Mar 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
47 commits
Select commit Hold shift + click to select a range
01daa3c
cleanup ./changelog entries
mpoke Jan 11, 2024
1ab7c20
rebase
insumity Feb 23, 2024
efe8cb1
fix!: Validation of SlashAcks fails due to marshaling to Bech32 (bac…
mergify[bot] Jan 19, 2024
a5c1d4e
docs: update changelog for v4.0.0 (#1578)
mpoke Jan 22, 2024
c03587c
docs: prepare for v4.0.0 (#1581)
mpoke Jan 22, 2024
6c39a2f
added proto declaration
insumity Feb 21, 2024
ddae681
temp commit
insumity Feb 21, 2024
c9b1b3e
temp commit
insumity Feb 21, 2024
e0861e2
more changes
insumity Feb 21, 2024
d6cd207
first commit
insumity Feb 22, 2024
d144c39
add param and fix tests
insumity Feb 23, 2024
44d13cd
reduce epoch size for e2e
insumity Feb 23, 2024
076ec8b
clean up
insumity Feb 23, 2024
a56b6e0
mbt fix
insumity Feb 23, 2024
36afb40
fix diff bug
insumity Feb 26, 2024
481fd88
cleaning up
insumity Feb 26, 2024
35385dd
cleaning up
insumity Feb 26, 2024
27290f6
cleaning up
insumity Feb 26, 2024
c707c54
cleaning up
insumity Feb 26, 2024
cf8bdb8
cleaning up
insumity Feb 27, 2024
ead0873
cleaning up
insumity Feb 27, 2024
88081cd
added more tests
insumity Feb 27, 2024
f6397ad
more fixes
insumity Feb 27, 2024
bee8c10
nit fixes
insumity Feb 27, 2024
abb4abc
cleaning up
insumity Feb 27, 2024
30f2061
increase downtime by one block
insumity Feb 28, 2024
e986692
fix logs
insumity Feb 28, 2024
8aa1dc9
took into account Marius' comments
insumity Feb 28, 2024
16958bc
tiny fixes
insumity Feb 28, 2024
513a75d
Update x/ccv/provider/keeper/params.go
insumity Feb 29, 2024
2208cd4
use Bech32 addresses as keys for maps
insumity Feb 29, 2024
914840c
refactor nextBlocks(epoch) to nextEpoch
insumity Mar 1, 2024
3164a09
Start adding epochs
p-offtermatt Mar 1, 2024
4fe6b75
Adjust tests for epochs
p-offtermatt Mar 1, 2024
af9df64
Use invariant script instead of handwriting Makefile
p-offtermatt Mar 1, 2024
614ff82
Fix key assignment valset invariant
p-offtermatt Mar 1, 2024
a16c49a
Add better run_invariants script
p-offtermatt Mar 1, 2024
bf9552b
Start adding epochs from trace into driver
p-offtermatt Mar 1, 2024
f7b38da
Remove new block creation during consumer chain setup
p-offtermatt Mar 4, 2024
9290a21
Adjust model for epochs
p-offtermatt Mar 4, 2024
ccf24af
Take into account comments
p-offtermatt Mar 6, 2024
9dba765
Merge branch 'feat/epochs' into ph/mbt-epochs
p-offtermatt Mar 8, 2024
9f1d40c
Revert changes to actions.go
p-offtermatt Mar 8, 2024
4edaae2
Revert changes to x/
p-offtermatt Mar 8, 2024
edf8cd0
Remove unused listMul
p-offtermatt Mar 8, 2024
769dc15
Advance time by epochLength instead of 1 second
p-offtermatt Mar 8, 2024
46e8e4e
Indent condition and clarify EndProviderEpoch
p-offtermatt Mar 8, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -133,10 +133,8 @@ test-trace:
# Note: this is *not* using the Quint models to test the system,
# this tests/verifies the Quint models *themselves*.
verify-models:
quint test tests/mbt/model/ccv_test.qnt;\
quint test tests/mbt/model/ccv_model.qnt;\
quint run --invariant "all{ValidatorUpdatesArePropagatedInv,ValidatorSetHasExistedInv,SameVscPacketsInv,MatureOnTimeInv,EventuallyMatureOnProviderInv}" tests/mbt/model/ccv_model.qnt --max-steps 200 --max-samples 200;\
quint run --invariant "all{ValidatorUpdatesArePropagatedKeyAssignmentInv,ValidatorSetHasExistedKeyAssignmentInv,SameVscPacketsKeyAssignmentInv,MatureOnTimeInv,EventuallyMatureOnProviderInv,KeyAssignmentRulesInv}" tests/mbt/model/ccv_model.qnt --step stepKeyAssignment --max-steps 200 --max-samples 200
cd tests/mbt/model;\
../run_invariants.sh



Expand Down
4 changes: 4 additions & 0 deletions tests/mbt/driver/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,10 @@ func (s *Driver) providerChain() *ibctesting.TestChain {
return s.chain("provider")
}

func (s *Driver) providerHeight() int64 {
return s.providerChain().CurrentHeader.Height
}

func (s *Driver) providerCtx() sdk.Context {
return s.providerChain().GetContext()
}
Expand Down
61 changes: 48 additions & 13 deletions tests/mbt/driver/mbt_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,17 +179,25 @@ func RunItfTrace(t *testing.T, path string) {
nodes[i] = addressMap[valName]
}

// very hacky: the system produces a lot of extra blocks, e.g. when setting up consumer chains, when updating clients, etc.
// to be able to compare the model to the system, we make the blocks per epoch a very large number (such that an epoch never ends naturally in the system while running the trace)
// When an epoch in the model ends (which we can detect by the height modulo the epoch length), we produce many, many blocks in the system, such that an epoch actually ends.
blocksPerEpoch := int64(200)
modelBlocksPerEpoch := params["BlocksPerEpoch"].Value.(int64)

driver := newDriver(t, nodes, valNames)
driver.DriverStats = &stats

driver.setupProvider(modelParams, valSet, signers, nodes, valNames)

// set `BlocksPerEpoch` to 10: a reasonable small value greater than 1 that prevents waiting for too
// many blocks and slowing down the tests
providerParams := driver.providerKeeper().GetParams(driver.providerCtx())
providerParams.BlocksPerEpoch = 10
providerParams.BlocksPerEpoch = blocksPerEpoch
driver.providerKeeper().SetParams(driver.providerCtx(), providerParams)

// begin enough blocks to end the first epoch
for i := int64(1); i < blocksPerEpoch; i++ {
driver.endAndBeginBlock("provider", 1*time.Nanosecond)
}

// remember the time offsets to be able to compare times to the model
// this is necessary because the system needs to do many steps to initialize the chains,
// which is abstracted away in the model
Expand All @@ -200,8 +208,16 @@ func RunItfTrace(t *testing.T, path string) {
t.Log("Reading the trace...")

for index, state := range trace.States {
t.Log("Height modulo epoch length:", driver.providerChain().CurrentHeader.Height%blocksPerEpoch)
t.Log("Model height modulo epoch length:", ProviderHeight(state.VarValues["currentState"].Value.(itf.MapExprType))%modelBlocksPerEpoch)
t.Logf("Reading state %v of trace %v", index, path)

// store the height of the provider state before each step.
// The height should only pass an epoch when it passes an epoch in the model, too,
// otherwise there is an error, and blocksPerEpoch needs to be increased.
// See the comment for blocksPerEpoch above.
heightBeforeStep := driver.providerHeight()

trace := state.VarValues["trace"].Value.(itf.ListExprType)
// lastAction will get the last action that was executed so far along the model trace,
// i.e. the action we should perform before checking model vs actual system equivalence
Expand Down Expand Up @@ -239,22 +255,33 @@ func RunItfTrace(t *testing.T, path string) {
stats.numStartedChains += len(consumersToStart)
stats.numStops += len(consumersToStop)

// get the block height in the model
modelHeight := ProviderHeight(currentModelState)

if modelHeight%modelBlocksPerEpoch == 0 {
// in the model, an epoch ends, so we need to produce blocks in the system to get the actual height
// to end an epoch with the first of the two subsequent calls to endAndBeginBlock below
actualHeight := driver.providerHeight()

heightInEpoch := actualHeight % blocksPerEpoch

// produce blocks until the next block ends the epoch
for i := heightInEpoch; i < blocksPerEpoch; i++ {
driver.endAndBeginBlock("provider", 1*time.Nanosecond)
}
}

// we need at least 2 blocks, because for a packet sent at height H, the receiving chain
// needs a header of height H+1 to accept the packet
// so, we do `blocksPerEpoch` time advancements with a very small increment,
// and then increment the rest of the time
// so, we do two blocks, one with a very small increment,
// and then another to increment the rest of the time
runningConsumersBefore := driver.runningConsumers()

// going through `blocksPerEpoch` blocks to take into account an epoch
blocksPerEpoch := driver.providerKeeper().GetBlocksPerEpoch(driver.providerCtx())
for i := int64(0); i < blocksPerEpoch; i = i + 1 {
driver.endAndBeginBlock("provider", 1*time.Nanosecond)
}
driver.endAndBeginBlock("provider", 1*time.Nanosecond)
for _, consumer := range driver.runningConsumers() {
UpdateProviderClientOnConsumer(t, driver, consumer.ChainId)
}

driver.endAndBeginBlock("provider", time.Duration(timeAdvancement)*time.Second-time.Nanosecond*time.Duration(blocksPerEpoch))
driver.endAndBeginBlock("provider", time.Duration(timeAdvancement)*time.Second-1*time.Nanosecond)

runningConsumersAfter := driver.runningConsumers()

Expand Down Expand Up @@ -436,6 +463,14 @@ func RunItfTrace(t *testing.T, path string) {

stats.EnterStats(driver)

// should not have ended an epoch, unless we also ended an epoch in the model
heightAfterStep := driver.providerHeight()

if heightBeforeStep/blocksPerEpoch != heightAfterStep/blocksPerEpoch {
// we changed epoch during this step, so ensure that the model also changed epochs
require.True(t, ProviderHeight(state.VarValues["currentState"].Value.(itf.MapExprType))%modelBlocksPerEpoch == 0, "Height in model did not change epoch, but did in system. increase blocksPerEpoch in the system")
}

t.Logf("State %v of trace %v is ok!", index, path)
}
t.Log("🟢 Trace is ok!")
Expand Down
4 changes: 4 additions & 0 deletions tests/mbt/driver/model_viewer.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,10 @@ func RunningTime(curStateExpr itf.MapExprType, chain string) int64 {
return ChainState(curStateExpr, chain)["runningTimestamp"].Value.(int64)
}

func ProviderHeight(curStateExpr itf.MapExprType) int64 {
return ProviderState(curStateExpr)["chainState"].Value.(itf.MapExprType)["currentBlockHeight"].Value.(int64)
}

// PacketQueue returns the queued packets between sender and receiver.
// Either sender or receiver need to be the provider.
func PacketQueue(curStateExpr itf.MapExprType, sender, receiver string) itf.ListExprType {
Expand Down
16 changes: 0 additions & 16 deletions tests/mbt/driver/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -397,22 +397,6 @@ func (s *Driver) ConfigureNewPath(consumerChain, providerChain *ibctesting.TestC
// their channel, and are ready for anything to happen.
s.consumerKeeper(consumerChainId).SetProviderChannel(s.ctx(consumerChainId), consumerEndPoint.ChannelID)

// Commit a block on both chains, giving us two committed headers from
// the same time and height. This is the starting point for all our
// data driven testing.
lastConsumerHeader, _ := simibc.EndBlock(consumerChain, func() {})
lastProviderHeader, _ := simibc.EndBlock(providerChain, func() {})

// Get ready to update clients.
simibc.BeginBlock(providerChain, 5)
simibc.BeginBlock(consumerChain, 5)

// Update clients to the latest header.
err = simibc.UpdateReceiverClient(consumerEndPoint, providerEndPoint, lastConsumerHeader, false)
require.NoError(s.t, err, "Error updating client on consumer for chain %v", consumerChain.ChainID)
err = simibc.UpdateReceiverClient(providerEndPoint, consumerEndPoint, lastProviderHeader, false)
require.NoError(s.t, err, "Error updating client on provider for chain %v", consumerChain.ChainID)

// path is ready to go
return path
}
Expand Down
46 changes: 27 additions & 19 deletions tests/mbt/model/ccv.qnt
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ module ccv_types {

// the running timestamp of the current block (that will be put on chain when the block is ended)
runningTimestamp: Time,

currentBlockHeight: int,
}

// utility function: returns a chain state that is initialized minimally.
Expand All @@ -67,6 +69,7 @@ module ccv_types {
currentValidatorSet: Map(),
lastTimestamp: -1, // last timestamp -1 means that in the model, there was no block committed on chain yet.
runningTimestamp: 0,
currentBlockHeight: 0
}

// Defines the current state of the provider chain. Essentially, all information here is stored by the provider on-chain (or could be derived purely by information that is on-chain).
Expand All @@ -86,11 +89,8 @@ module ccv_types {
// Stores VscPackets which have been sent but where the provider has *not received a response yet*.
sentVscPacketsToConsumer: Chain -> List[VscPacket],

// stores whether, in this block, the validator set has changed.
// this is needed because the validator set might be considered to have changed, even though
// it is still technically identical at our level of abstraction, e.g. a validator power change on the provider
// might leave the validator set the same because a delegation and undelegation cancel each other out.
providerValidatorSetChangedInThisBlock: bool,
// stores for which consumer chains, in this epoch, the validator set is considered to have changed and we thus need to send a VscPacket to the consumer chains.
consumersWithPowerChangesInThisEpoch: Set[Chain],

// stores, for each consumer chain, its current status -
// not consumer, running, or stopped
Expand All @@ -110,7 +110,7 @@ module ccv_types {
consumerAddrToValidator: Chain -> (ConsumerAddr -> Node),

// For every consumer chain, stores whether the key assignment for the consumer chain has changed in this block.
consumersWithAddrAssignmentChangesInThisBlock: Set[Chain],
consumersWithAddrAssignmentChangesInThisEpoch: Set[Chain],

// the history of validator sets on the provider, but with the key assignments applied.
// This is needed to check invariants about the validator set when key assignments are in play.
Expand All @@ -130,15 +130,15 @@ module ccv_types {
outstandingPacketsToConsumer: Map(),
receivedMaturations: Set(),
sentVscPacketsToConsumer: Map(),
providerValidatorSetChangedInThisBlock: false,
consumersWithPowerChangesInThisEpoch: Set(),
consumerStatus: Map(),
runningVscId: 0,
validatorToConsumerAddr: Map(),
keyAssignedValSetHistory: Map(),
consumerAddrToValidator: Map(),
consumerAddrsToPrune: Map(),
keyAssignmentsForVSCPackets: Map(),
consumersWithAddrAssignmentChangesInThisBlock: Set()
consumersWithAddrAssignmentChangesInThisEpoch: Set()
}


Expand Down Expand Up @@ -271,6 +271,10 @@ module ccv {
// they expire and the channel will be closed.
const TrustingPeriodPerChain: Chain -> int

// The number of blocks in an epoch.
// VscPackets are only sent to consumer chains at the end of every epoch.
const BlocksPerEpoch: int

// ===================
// PROTOCOL LOGIC contains the meat of the protocol
// functions here roughly correspond to API calls that can be triggered from external sources
Expand All @@ -294,7 +298,7 @@ module ccv {
} else {
// set the validator set changed flag
val newProviderState = currentState.providerState.with(
"providerValidatorSetChangedInThisBlock", true
"consumersWithPowerChangesInThisEpoch", getRunningConsumers(currentState.providerState)
)
pure val tmpState = currentState.with(
"providerState", newProviderState
Expand Down Expand Up @@ -455,10 +459,16 @@ module ccv {

// send vsc packets (will be a noop if no sends are necessary)
val providerStateAfterSending =
providerStateAfterTimeAdvancement.sendVscPackets(
currentProviderState.chainState.runningTimestamp,
CcvTimeout.get(PROVIDER_CHAIN)
)
// if currentBlockHeight is a multiple of BlocksPerEpoch, send VscPackets
if (providerStateAfterTimeAdvancement.chainState.currentBlockHeight % BlocksPerEpoch == 0) {
providerStateAfterTimeAdvancement.sendVscPackets(
currentProviderState.chainState.runningTimestamp,
CcvTimeout.get(PROVIDER_CHAIN)
)
} else {
// otherwise, just do a noop
providerStateAfterTimeAdvancement
}


// start/stop chains
Expand All @@ -471,8 +481,6 @@ module ccv {
val err = res._2
val providerStateAfterConsumerAdvancement = providerStateAfterSending.with(
"consumerStatus", newConsumerStatus
).with(
"providerValidatorSetChangedInThisBlock", false
)

if (err != "") {
Expand Down Expand Up @@ -609,17 +617,17 @@ module ccv {
// check whether the validator has positive power
pure val provValSet = currentState.providerState.chainState.currentValidatorSet
pure val provValPower = if (provValSet.keys().contains(providerNode)) provValSet.get(providerNode) else 0
pure val consumersWithAddrAssignmentChangesInThisBlock =
pure val consumersWithAddrAssignmentChangesInThisEpoch =
if (provValPower > 0) {
// if the consumer has positive power, the relevant key assignment for the consumer changed
currentState.providerState.consumersWithAddrAssignmentChangesInThisBlock.union(Set(consumer))
currentState.providerState.consumersWithAddrAssignmentChangesInThisEpoch.union(Set(consumer))
} else {
// otherwise, the consumer doesn't need to know about the change, so no change
currentState.providerState.consumersWithAddrAssignmentChangesInThisBlock
currentState.providerState.consumersWithAddrAssignmentChangesInThisEpoch
}
pure val tmpStateAfterKeyAssignmentReplacement = tmpState.with(
"providerState", tmpState.providerState.with(
"consumersWithAddrAssignmentChangesInThisBlock", consumersWithAddrAssignmentChangesInThisBlock
"consumersWithAddrAssignmentChangesInThisEpoch", consumersWithAddrAssignmentChangesInThisEpoch
)
)

Expand Down
Loading
Loading