Skip to content
This repository has been archived by the owner on Aug 2, 2022. It is now read-only.

Commit

Permalink
Merge pull request #10288 from EOSIO/bdj__privacy-test-case-1_EPE-832
Browse files Browse the repository at this point in the history
Privacy Test Case #1
  • Loading branch information
brianjohnson5972 authored Apr 23, 2021
2 parents 37bc68a + cc4d02f commit 465616c
Show file tree
Hide file tree
Showing 4 changed files with 65 additions and 72 deletions.
2 changes: 1 addition & 1 deletion tests/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ add_test(NAME light_validation_sync_test COMMAND tests/light_validation_sync_tes
set_property(TEST light_validation_sync_test PROPERTY LABELS nonparallelizable_tests)
add_test(NAME eosio_blocklog_prune_test COMMAND tests/eosio_blocklog_prune_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST eosio_blocklog_prune_test PROPERTY LABELS nonparallelizable_tests)
add_test(NAME privacy_startup_network COMMAND tests/privacy_startup_network.py -p 1 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME privacy_startup_network COMMAND tests/privacy_startup_network.py -p 2 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST privacy_startup_network PROPERTY LABELS nonparallelizable_tests)
add_test(NAME privacy_simple_network COMMAND tests/privacy_simple_network.py -p 2 -n 3 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST privacy_simple_network PROPERTY LABELS nonparallelizable_tests)
Expand Down
20 changes: 19 additions & 1 deletion tests/SecurityGroup.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,17 @@ def __init__(self, nonParticipants, contractAccount, defaultNode=None, minAddRem
self.participants = []
self.contractAccount = contractAccount
assert len(nonParticipants) > 0
# copy over all the running processes
self.nonParticipants = copy.copy(nonParticipants)
if Utils.Debug: Utils.Print("Creating SecurityGroup with the following nonParticipants: []".format(SecurityGroup.createAction(self.nonParticipants)))
self.defaultNode = defaultNode if defaultNode else nonParticipants[0]
def findDefault(nodes):
for node in nodes:
if node.pid:
return node

Utils.errorExit("SecurityGroup is being constructed with no running nodes, there needs to be at least one running node")

self.defaultNode = defaultNode if defaultNode else findDefault(self.nonParticipants)
self.publishProcessNum = minAddRemEntriesToPublish
if activateAndPublish:
SecurityGroup.activateFeature(self.defaultNode)
Expand Down Expand Up @@ -110,9 +118,14 @@ def copyIfNeeded(nodes):
def verifyParticipantsTransactionFinalized(self, transId):
Utils.Print("Verify participants are in sync")
assert transId
atLeastOne = False
for part in self.participants:
if part.pid is None:
continue
atLeastOne = True
if part.waitForTransFinalization(transId) == None:
Utils.errorExit("Transaction: {}, never finalized".format(trans))
assert atLeastOne, "None of the participants are currently running, no reason to call verifyParticipantsTransactionFinalized"

# verify that the block for the transaction ID is never finalized in nonParticipants
def verifyNonParticipants(self, transId):
Expand All @@ -130,11 +143,16 @@ def verifyNonParticipants(self, transId):
# verify each nonParticipant in the list has not advanced its lib to the publish block, since the block that would cause it to become finalized would
# never have been forwarded to a nonParticipant
for nonParticipant in self.nonParticipants:
if nonParticipant.pid is None:
continue
nonParticipantPostLIB = nonParticipant.getBlockNum(blockType=BlockType.lib)
assert nonParticipantPostLIB < publishBlock, "Participants not in security group should not have advanced LIB to {}, but it has advanced to {}".format(publishBlock, nonParticipantPostLIB)
nonParticipantHead = nonParticipant.getBlockNum()
assert nonParticipantHead < producerHead, "Participants (that are not producers themselves) should not advance head to {}, but it has advanced to {}".format(producerHead, nonParticipantHead)

def getLatestPublishTransId(self):
return Node.getTransId(self.publishTrans)

# verify that the participants' and nonParticipants' nodes are consistent based on the publish transaction
def verifySecurityGroup(self, publishTrans = None):
if publishTrans is None:
Expand Down
9 changes: 6 additions & 3 deletions tests/privacy_simple_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,13 @@
import time

###############################################################
# privacy_startup_network
# privacy_simple_network
#
# General test for Privacy to verify TLS connections, and slowly adding participants to the security group and verifying
# how blocks and transactions are sent/not sent.
# Implements Privacy Test Case #2 (and other misc scenarios). It creates a simple network of mesh connected
# producers and non-producer nodes. It adds the producers to the security group and verifies they are in
# sync and the non-producers are not. Then, one by one it adds the non-producing nodes to the security
# group, and verifies that the correct nodes are in sync and the others are not. It also repeatedly changes
# the security group, not letting it finalize, to verify Test Case #2.
#
###############################################################

Expand Down
106 changes: 39 additions & 67 deletions tests/privacy_startup_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
from Node import ReturnType
from TestHelper import TestHelper

import copy
import decimal
import re
import signal
Expand All @@ -18,8 +19,17 @@
###############################################################
# privacy_startup_network
#
# General test for Privacy to verify TLS connections, and slowly adding participants to the security group and verifying
# how blocks and transactions are sent/not sent.
# Script implements Privacy Test Case #1. It pairs up producers with p2p connections with relay nodes
# and the relay nodes connected to at least 2 or more API nodes. The producers and relay nodes are
# added to the security Group and then it validates they are in sync and the api nodes do not receive
# blocks. Then it adds all but one api nodes and verifies they are in sync with producers, then all
# nodes are added and verifies that all nodes are in sync.
#
# NOTE: A relay node is a node that an entity running a producer uses to prevent outside nodes from
# affecting the producing node. An API Node is a node that is setup for the general community to
# connect to and will have more p2p connections. This script doesn't necessarily setup the API nodes
# the way that they are setup in the real world, but it is referencing them this way to explain what
# the test is intending to verify.
#
###############################################################

Expand All @@ -36,8 +46,10 @@
apiNodes=2 # minimum number of apiNodes that will be used in this test
minTotalNodes=pnodes+relayNodes+apiNodes
totalNodes=args.n if args.n >= minTotalNodes else minTotalNodes
if totalNodes > minTotalNodes:
if totalNodes >= minTotalNodes:
apiNodes += totalNodes - minTotalNodes
else:
Utils.Print("Requested {} total nodes, but since the minumum number of API nodes is {}, there will be {} total nodes".format(args.n, apiNodes, totalNodes))

Utils.Debug=args.v
dumpErrorDetails=args.dump_error_details
Expand Down Expand Up @@ -74,8 +86,12 @@
apiNodeNums = [x for x in range(firstApiNodeNum, totalNodes)]
for producerNum in range(pnodes):
pairedRelayNodeNum = pnodes + producerNum
# p2p connection between producer and relay
topo[producerNum] = [pairedRelayNodeNum]
topo[pairedRelayNodeNum] = apiNodeNums
# p2p connections between relays
topo[pairedRelayNodeNum] = [x + producerNum for x in range(pnodes) if x != producerNum]
# p2p connections between relay and all api nodes
topo[pairedRelayNodeNum].extend(apiNodeNums)
Utils.Print("topo: {}".format(json.dumps(topo, indent=4, sort_keys=True)))

# adjust prodCount to ensure that lib trails more than 1 block behind head
Expand All @@ -94,69 +110,25 @@
relays = [cluster.getNode(pnodes + x) for x in range(pnodes) ]
apiNodes = [cluster.getNode(x) for x in apiNodeNums]

def createAccount(newAcc):
producers[0].createInitializeAccount(newAcc, cluster.eosioAccount)
ignWallet = cluster.walletMgr.create("ignition") # will actually just look up the wallet
cluster.walletMgr.importKey(newAcc, ignWallet)

numAccounts = 4
testAccounts = Cluster.createAccountKeys(numAccounts)
accountPrefix = "testaccount"
for i in range(numAccounts):
testAccount = testAccounts[i]
testAccount.name = accountPrefix + str(i + 1)
createAccount(testAccount)

blockProducer = None

def verifyInSync(producerNum):
Utils.Print("Ensure all nodes are in-sync")
lib = producers[producerNum].getInfo()["last_irreversible_block_num"]
headBlockNum = producers[producerNum].getBlockNum()
headBlock = producers[producerNum].getBlock(headBlockNum)
global blockProducer
if blockProducer is None:
blockProducer = headBlock["producer"]
Utils.Print("headBlock: {}".format(json.dumps(headBlock, indent=4, sort_keys=True)))
headBlockId = headBlock["id"]
for prod in producers:
if prod == producers[producerNum]:
continue

assert prod.waitForBlock(headBlockNum, timeout = 10, reportInterval = 1) != None, "Producer node failed to get block number {}".format(headBlockNum)
prod.getBlock(headBlockId) # if it isn't there it will throw an exception
assert prod.waitForBlock(lib, blockType=BlockType.lib), \
"Producer node is failing to advance its lib ({}) with producer {} ({})".format(node.getInfo()["last_irreversible_block_num"], producerNum, lib)
for node in apiNodes:
assert node.waitForBlock(headBlockNum, timeout = 10, reportInterval = 1) != None, "API node failed to get block number {}".format(headBlockNum)
node.getBlock(headBlockId) # if it isn't there it will throw an exception
assert node.waitForBlock(lib, blockType=BlockType.lib), \
"API node is failing to advance its lib ({}) with producer {} ({})".format(node.getInfo()["last_irreversible_block_num"], producerNum, lib)

Utils.Print("Ensure all nodes are in-sync")
assert node.waitForBlock(lib + 1, blockType=BlockType.lib, reportInterval = 1) != None, "Producer node failed to advance lib ahead one block to: {}".format(lib + 1)

verifyInSync(producerNum=0)

featureDict = producers[0].getSupportedProtocolFeatureDict()
Utils.Print("feature dict: {}".format(json.dumps(featureDict, indent=4, sort_keys=True)))

Utils.Print("act feature dict: {}".format(json.dumps(producers[0].getActivatedProtocolFeatures(), indent=4, sort_keys=True)))
timeout = ( pnodes * 12 / 2 ) * 2 # (number of producers * blocks produced / 0.5 blocks per second) * 2 rounds
producers[0].waitUntilBeginningOfProdTurn(blockProducer, timeout=timeout)
feature = "SECURITY_GROUP"
producers[0].activateFeatures([feature])
assert producers[0].containsFeatures([feature]), "{} feature was not activated".format(feature)

if sanityTest:
testSuccessful=True
exit(0)

def publishContract(account, wasmFile, waitForTransBlock=False):
Print("Publish contract")
return producers[0].publishContract(account, "unittests/test-contracts/security_group_test/", wasmFile, abiFile=None, waitForTransBlock=waitForTransBlock)

publishContract(testAccounts[0], 'security_group_test.wasm', waitForTransBlock=True)
securityGroup = cluster.getSecurityGroup()
cluster.reportInfo()

Utils.Print("Add all producers and relay nodes to security group")
prodsAndRelays = copy.copy(producers)
prodsAndRelays.extend(relays)
securityGroup.editSecurityGroup(prodsAndRelays)
securityGroup.verifySecurityGroup()

allButLastApiNodes = apiNodes[:-1]
lastApiNode = [apiNodes[-1]]

Utils.Print("Add all but last API node and verify they receive blocks and the last API node does not")
securityGroup.editSecurityGroup(addNodes=allButLastApiNodes)
securityGroup.verifySecurityGroup()

Utils.Print("Add the last API node and verify it receives blocks")
securityGroup.editSecurityGroup(addNodes=lastApiNode)
securityGroup.verifySecurityGroup()

testSuccessful=True
finally:
Expand Down

0 comments on commit 465616c

Please sign in to comment.