diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index e7fc99911c4..7e673e289f6 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -119,7 +119,7 @@ add_test(NAME eosio_blocklog_prune_test COMMAND tests/eosio_blocklog_prune_test. set_property(TEST eosio_blocklog_prune_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME privacy_startup_network COMMAND tests/privacy_startup_network.py -p 1 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST privacy_startup_network PROPERTY LABELS nonparallelizable_tests) -add_test(NAME privacy_simple_network COMMAND tests/privacy_simple_network.py -p 2 -n 3 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_test(NAME privacy_simple_network COMMAND tests/privacy_simple_network.py -p 2 -n 4 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST privacy_simple_network PROPERTY LABELS nonparallelizable_tests) # Long running tests diff --git a/tests/Cluster.py b/tests/Cluster.py index fb879ab2688..0536f1b057f 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -538,6 +538,8 @@ def connectGroup(group, producerNodes, bridgeNodes) : if onlyBios: self.nodes=[biosNode] + self.totalNodes = totalNodes + # ensure cluster node are inter-connected by ensuring everyone has block 1 Utils.Print("Cluster viability smoke test. Validate every cluster node has block 1. ") if not self.waitOnClusterBlockNumSync(1): @@ -1032,24 +1034,6 @@ def parseProducerKeys(configFile, nodeName): return producerKeys - @staticmethod - def parseProducers(nodeNum): - """Parse node config file for producers.""" - - configFile=Utils.getNodeConfigDir(nodeNum, "config.ini") - if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) - configStr=None - with open(configFile, 'r') as f: - configStr=f.read() - - pattern=r"^\s*producer-name\s*=\W*(\w+)\W*$" - producerMatches=re.findall(pattern, configStr, re.MULTILINE) - if producerMatches is None: - if Utils.Debug: Utils.Print("Failed to find producers.") - return None - - return producerMatches - @staticmethod def parseClusterKeys(totalNodes): """Parse cluster config file. Updates producer keys data members.""" @@ -1877,3 +1861,23 @@ def verifyInSync(self, sourceNodeNum=0, specificNodes=None): if error: self.reportInfo() Utils.errorExit(error) + + def getParticipantNum(self, nodeToIdentify): + num = 0 + for node in self.nodes: + if node == nodeToIdentify: + return num + num += 1 + assert nodeToIdentify == self.biosNode + return self.totalNodes + + def getProducingNodeIndex(self, blockProducer): + featureProdNum = 0 + while featureProdNum < pnodes: + if blockProducer in self.nodes[featureProdNum].getProducers(): + return featureProdNum + + featureProdNum += 1 + + assert blockProducer in self.biosNode.getProducers(), "Checked all nodes but could not find producer: {}".format(blockProducer) + return "bios" diff --git a/tests/Node.py b/tests/Node.py index 61c69991f36..dc716e1b368 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1200,7 +1200,7 @@ def getBlockProducerByNum(self, blockNum, timeout=None, waitForBlock=True, exitO block=self.getBlock(blockNum, exitOnError=exitOnError) return Node.getBlockAttribute(block, "producer", blockNum, exitOnError=exitOnError) - def getBlockProducer(self, timeout=None, waitForBlock=True, exitOnError=True, blockType=BlockType.head): + def getBlockProducer(self, timeout=None, exitOnError=True, blockType=BlockType.head): blockNum=self.getBlockNum(blockType=blockType) block=self.getBlock(blockNum, exitOnError=exitOnError, blockType=blockType) return Node.getBlockAttribute(block, "producer", blockNum, exitOnError=exitOnError) @@ -1453,7 +1453,8 @@ def isDesiredProdTurn(): return beginningOfProdTurnHead # Require producer_api_plugin - def activateFeatures(self, features): + def activateFeatures(self, features, blocksToAdvance=2): + assert blocksToAdvance >= 0 featureDigests = [] for feature in features: protocolFeatureDigestDict = self.getSupportedProtocolFeatureDict() @@ -1465,16 +1466,44 @@ def activateFeatures(self, features): self.scheduleProtocolFeatureActivations(featureDigests) # Wait for the next block to be produced so the scheduled protocol feature is activated - assert self.waitForHeadToAdvance(blocksToAdvance=2), print("ERROR: TIMEOUT WAITING FOR activating features: {}".format(",".join(features))) + assert self.waitForHeadToAdvance(blocksToAdvance=blocksToAdvance), print("ERROR: TIMEOUT WAITING FOR activating features: {}".format(",".join(features))) + + def activateAndVerifyFeatures(self, features): + self.activateFeatures(features, blocksToAdvance=0) + headBlockNum = self.getBlockNum() + blockNum = headBlockNum + producers = {} + lastProducer = None + while True: + block = self.getBlock(blockNum) + blockHeaderState = self.getBlockHeaderState(blockNum) + if self.containsFeatures(features, blockHeaderState): + return + + producer = block["producer"] + producers[producer] += 1 + assert lastProducer != producer or producers[producer] == 1, \ + "We have already cycled through a complete cycle, so feature should have been set by now. \ + Initial block num: {}, looking at block num: {}".format(headBlockNum, blockNum) + + # feature should be in block for this node's producers, if it is at least 2 blocks after we sent the activate + minBlocksForGuarantee = 2 + assert producer not in self.getProducers() or blockNum - headBlockNum < minBlocksForGuarantee, \ + "It is {} blocks past the block when we activated the features and block num: {} was produced by this \ + node, so features should have been set." + self.waitForBlock(blockNum + 1) + blockNum = self.getBlockNum() + + # Require producer_api_plugin def activatePreactivateFeature(self): return self.activateFeatures(["PREACTIVATE_FEATURE"]) - def containsFeatures(self, features): + def containsFeatures(self, features, blockHeaderState=None): protocolFeatureDict = self.getSupportedProtocolFeatureDict() - blockHeaderState = self.getLatestBlockHeaderState() - assert blockHeaderState, "blockHeaderState should not be empty" + if blockHeaderState is None: + blockHeaderState = self.getLatestBlockHeaderState() for feature in features: featureDigest = protocolFeatureDict[feature]["feature_digest"] assert featureDigest, "{}'s Digest should not be empty".format(feature) @@ -1520,20 +1549,26 @@ def preactivateAllBuiltinProtocolFeature(self): def getLatestBlockHeaderState(self): headBlockNum = self.getHeadBlockNum() - for i in range(10): - cmdDesc = "get block {} --header-state".format(headBlockNum) - latestBlockHeaderState = self.processCleosCmd(cmdDesc, cmdDesc) - Utils.Print("block num: {}, block state: {}, head: {}".format(headBlockNum, latestBlockHeaderState, self.getHeadBlockNum())) - if latestBlockHeaderState: - return latestBlockHeaderState - time.sleep(1) - return None - - def getActivatedProtocolFeatures(self): - latestBlockHeaderState = self.getLatestBlockHeaderState() - if "activated_protocol_features" not in latestBlockHeaderState or "protocol_features" not in latestBlockHeaderState["activated_protocol_features"]: + return self.getBlockHeaderState(headBlockNum) + + def getBlockHeaderState(self, blockNum, errorOnNone=True): + cmdDesc = "get block {} --header-state".format(blockNum) + blockHeaderState = self.processCleosCmd(cmdDesc, cmdDesc) + if blockHeaderState is None and errorOnNone: + info = self.getInfo() + lib = info["last_irreversible_block_num"] + head = info["head_block_num"] + assert head == lib + 1, "getLatestBlockHeaderState failed to retrieve the latest block. This should be investigated." + Utils.errorExit("Called getLatestBlockHeaderState, which can only retrieve blocks in reversible database, but the test setup only has one producer so there" + + " is only 1 block in the reversible database. Test should be redesigned to aquire this information via another interface.") + return blockHeaderState + + def getActivatedProtocolFeatures(self, blockHeaderState=None): + if blockHeaderState is None: + blockHeaderState = self.getLatestBlockHeaderState() + if "activated_protocol_features" not in blockHeaderState or "protocol_features" not in blockHeaderState["activated_protocol_features"]: Utils.errorExit("getLatestBlockHeaderState did not return expected output, should contain [\"activated_protocol_features\"][\"protocol_features\"]: {}".format(latestBlockHeaderState)) - return latestBlockHeaderState["activated_protocol_features"]["protocol_features"] + return blockHeaderState["activated_protocol_features"]["protocol_features"] def modifyBuiltinPFSubjRestrictions(self, featureCodename, subjectiveRestriction={}): jsonPath = os.path.join(Utils.getNodeConfigDir(self.nodeId), @@ -1687,3 +1722,24 @@ def waitForIrreversibleBlockProducedBy(self, producer, startBlockNum=0, retry=10 retry = retry - 1 startBlockNum = latestBlockNum + 1 return False + + @staticmethod + def parseProducers(nodeNum): + """Parse node config file for producers.""" + + configFile=Utils.getNodeConfigDir(nodeNum, "config.ini") + if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + configStr=None + with open(configFile, 'r') as f: + configStr=f.read() + + pattern=r"^\s*producer-name\s*=\W*(\w+)\W*$" + producerMatches=re.findall(pattern, configStr, re.MULTILINE) + if producerMatches is None: + if Utils.Debug: Utils.Print("Failed to find producers.") + return None + + return producerMatches + + def getProducers(self): + return Node.parseProducers(self.nodeId) \ No newline at end of file diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 06ae59646a9..9baa3b1a7e5 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -207,7 +207,7 @@ def getMinHeadAndLib(prodNodes): producers=[] for i in range(0, totalNodes): node=cluster.getNode(i) - node.producers=Cluster.parseProducers(i) + node.producers=node.getProducers() numProducers=len(node.producers) Print("node has producers=%s" % (node.producers)) if numProducers==0: diff --git a/tests/nodeos_high_transaction_test.py b/tests/nodeos_high_transaction_test.py index 6108af9ffa0..8107e973ce5 100755 --- a/tests/nodeos_high_transaction_test.py +++ b/tests/nodeos_high_transaction_test.py @@ -116,7 +116,7 @@ allNodes=cluster.getNodes() for i in range(0, totalNodes): node=allNodes[i] - nodeProducers=Cluster.parseProducers(i) + nodeProducers=node.getProducers() numProducers=len(nodeProducers) Print("node has producers=%s" % (nodeProducers)) if numProducers==0: diff --git a/tests/nodeos_short_fork_take_over_test.py b/tests/nodeos_short_fork_take_over_test.py index 29aa223aee2..f09b860fb74 100755 --- a/tests/nodeos_short_fork_take_over_test.py +++ b/tests/nodeos_short_fork_take_over_test.py @@ -170,7 +170,7 @@ def getMinHeadAndLib(prodNodes): producers=[] for i in range(0, totalNodes): node=cluster.getNode(i) - node.producers=Cluster.parseProducers(i) + node.producers=node.getProducers() numProducers=len(node.producers) Print("node has producers=%s" % (node.producers)) if numProducers==0: diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index a3c157e8027..ae951ef096b 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -202,7 +202,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): for i in range(0, totalNodes): node=cluster.getNode(i) - node.producers=Cluster.parseProducers(i) + node.producers=node.getProducers() for prod in node.producers: trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True) diff --git a/tests/privacy_simple_network.py b/tests/privacy_simple_network.py index b55b5f96b2c..9a41d9d8178 100755 --- a/tests/privacy_simple_network.py +++ b/tests/privacy_simple_network.py @@ -80,7 +80,6 @@ Print("Validating system accounts after bootstrap") cluster.validateAccounts(None) - Utils.Print("\n\n\n\n\nCheck after KILL:") Utils.Print("\n\n\n\n\nNext Round of Info:") cluster.reportInfo() @@ -88,29 +87,16 @@ apiNodes = [cluster.getNode(x) for x in range(pnodes, totalNodes)] apiNodes.append(cluster.biosNode) - blockProducer = producers[0].getHeadOrLib()["producer"] - + feature = "SECURITY_GROUP" + Utils.Print("Activating {} Feature".format(feature)) + producers[0].activateAndVerifyFeatures({feature}) cluster.verifyInSync() featureDict = producers[0].getSupportedProtocolFeatureDict() Utils.Print("feature dict: {}".format(json.dumps(featureDict, indent=4, sort_keys=True))) + Utils.Print("{} Feature activated".format(feature)) cluster.reportInfo() - Utils.Print("Activating SECURITY_GROUP Feature") - - #Utils.Print("act feature dict: {}".format(json.dumps(producers[0].getActivatedProtocolFeatures(), indent=4, sort_keys=True))) - timeout = ( pnodes * 12 / 2 ) * 2 # (number of producers * blocks produced / 0.5 blocks per second) * 2 rounds - for producer in producers: - producers[0].waitUntilBeginningOfProdTurn(blockProducer, timeout=timeout) - feature = "SECURITY_GROUP" - producers[0].activateFeatures([feature]) - if producers[0].containsFeatures([feature]): - break - - Utils.Print("SECURITY_GROUP Feature activated") - cluster.reportInfo() - - assert producers[0].containsFeatures([feature]), "{} feature was not activated".format(feature) def publishContract(account, file, waitForTransBlock=False): Print("Publish contract") @@ -122,20 +108,37 @@ def publishContract(account, file, waitForTransBlock=False): participants = [x for x in producers] nonParticipants = [x for x in apiNodes] - def security_group(nodeNums): - action = None - for nodeNum in nodeNums: - if action is None: - action = '[[' - else: - action += ',' - action += '"{}"'.format(Node.participantName(nodeNum)) - action += ']]' - - Utils.Print("adding {} to the security group".format(action)) - trans = producers[0].pushMessage(cluster.eosioAccount.name, "add", action, "--permission eosio@active") - Utils.Print("add trans: {}".format(json.dumps(trans, indent=4, sort_keys=True))) - trans = producers[0].pushMessage(cluster.eosioAccount.name, "publish", "[0]", "--permission eosio@active") + # this is passed to limit the number of add/remove table entries are processed, but using it here to keep from getting duplicate transactions + publishProcessNum = 20 + def security_group(addNodeNums=[], removeNodeNums=[]): + def createAction(nodeNums): + action = None + for nodeNum in nodeNums: + if action is None: + action = '[[' + else: + action += ',' + action += '"{}"'.format(Node.participantName(nodeNum)) + if action: + action += ']]' + return action + + addAction = createAction(addNodeNums) + removeAction = createAction(removeNodeNums) + + if addAction: + Utils.Print("adding {} to the security group".format(addAction)) + trans = producers[0].pushMessage(cluster.eosioAccount.name, "add", addAction, "--permission eosio@active") + Utils.Print("add trans: {}".format(json.dumps(trans, indent=4, sort_keys=True))) + + if removeAction: + Utils.Print("removing {} from the security group".format(removeAction)) + trans = producers[0].pushMessage(cluster.eosioAccount.name, "remove", removeAction, "--permission eosio@active") + Utils.Print("remove trans: {}".format(json.dumps(trans, indent=4, sort_keys=True))) + + global publishProcessNum + publishProcessNum += 1 + trans = producers[0].pushMessage(cluster.eosioAccount.name, "publish", "[{}]".format(publishProcessNum), "--permission eosio@active") Utils.Print("publish action trans: {}".format(json.dumps(trans, indent=4, sort_keys=True))) return trans @@ -160,33 +163,120 @@ def verifyNonParticipants(transId): nonParticipantHead = nonParticipant.getBlockNum() assert nonParticipantHead < producerHead, "Participants (that are not producers themselves) should not advance head to {}, but it has advanced to {}".format(producerHead, nonParticipantHead) + def verifySecurityGroup(publishTransPair): + publishTransId = Node.getTransId(publishTransPair[1]) + verifyParticipantsTransactionFinalized(publishTransId) + verifyNonParticipants(publishTransId) + + def moveToParticipants(): + movedNode = nonParticipants[0] + participants.append(movedNode) + del nonParticipants[0] + return movedNode + + def moveToNonParticipants(): + movedNode = participants[-1] + # popping off back of participants and need to push on the front of nonParticipants + nonParticipants.insert(0, movedNode) + del participants[-1] + return movedNode + + def addToSg(): + node = moveToParticipants() + Utils.Print("Take a non-participant and make a participant. Now there are {} participants and {} non-participants".format(len(participants), len(nonParticipants))) + toAddNum = cluster.getParticipantNum(node) + return security_group([toAddNum]) + + def remFromSg(): + node = moveToNonParticipants() + Utils.Print("Take a participant and make a non-participant. Now there are {} participants and {} non-participants".format(len(participants), len(nonParticipants))) + toRemoveNum = cluster.getParticipantNum(node) + return security_group(removeNodeNums=[toRemoveNum]) + Utils.Print("Add all producers to security group") publishTrans = security_group([x for x in range(pnodes)]) - publishTransId = Node.getTransId(publishTrans[1]) - verifyParticipantsTransactionFinalized(publishTransId) - verifyNonParticipants(publishTransId) + verifySecurityGroup(publishTrans) + + cluster.reportInfo() + + # one by one add each nonParticipant to the security group while len(nonParticipants) > 0: - toAdd = nonParticipants[0] - participants.append(toAdd) - del nonParticipants[0] - Utils.Print("Take a non-participant and make a participant. Now there are {} participants and {} non-participants".format(len(participants), len(nonParticipants))) + publishTrans = addToSg() + verifySecurityGroup(publishTrans) + cluster.reportInfo() - toAddNum = None - num = 0 - for node in cluster.getNodes(): - if node == toAdd: - toAddNum = num - break - num += 1 - if toAddNum is None: - assert toAdd == cluster.biosNode - toAddNum = totalNodes - publishTrans = security_group([toAddNum]) - publishTransId = Node.getTransId(publishTrans[1]) - verifyParticipantsTransactionFinalized(publishTransId) - verifyNonParticipants(publishTransId) + # one by one remove each (original) nonParticipant from the security group + while len(participants) > pnodes: + publishTrans = remFromSg() + verifySecurityGroup(publishTrans) + cluster.reportInfo() + + + # if we have more than 1 api node, we will add and remove all those nodes in bulk, if not it is just a repeat of the above test + if len(apiNodes) > 1: + # add all the api nodes to security group at once + toAdd = [] + for apiNode in nonParticipants: + participantNum = cluster.getParticipantNum(apiNode) + toAdd.append(participantNum) + participants.extend(nonParticipants) + nonParticipants = [] + + Utils.Print("Add all api nodes to security group") + publishTrans = security_group(addNodeNums=toAdd) + verifySecurityGroup(publishTrans) + + cluster.reportInfo() + + + # alternate adding/removing participants to ensure the security group doesn't change + initialBlockNum = None + blockNum = None + def is_done(): + # want to ensure that we can identify the range of libs the security group was changed in + return blockNum - initialBlockNum > 12 + + done = False + # keep adding and removing nodes till we are done + while not done: + if blockNum: + participants[0].waitForNextBlock() + + while not done and len(participants) > pnodes: + publishTrans = remFromSg() + Utils.Print("publishTrans: {}".format(json.dumps(publishTrans, indent=2))) + blockNum = Node.getTransBlockNum(publishTrans[1]) + if initialBlockNum is None: + initialBlockNum = blockNum + lastBlockNum = blockNum + done = is_done() + + while not done and len(nonParticipants) > 0: + publishTrans = addToSg() + blockNum = Node.getTransBlockNum(publishTrans[1]) + done = is_done() + + Utils.Print("First adjustment to security group was in block num: {}, verifying no changes till block num: {} is finalized".format(initialBlockNum, blockNum)) + verifySecurityGroup(publishTrans) + + cluster.reportInfo() + + # remove all the api nodes from the security group at once + toRemove = [] + # index pnodes and following are moving to nonParticipants, so participants has everything before that + nonParticipants = participants[pnodes:] + participants = participants[:pnodes] + for apiNode in nonParticipants: + participantNum = cluster.getParticipantNum(apiNode) + toRemove.append(participantNum) + + Utils.Print("Remove all api nodes from security group") + publishTrans = security_group(removeNodeNums=toRemove) + verifySecurityGroup(publishTrans) + + cluster.reportInfo() testSuccessful=True finally: