From 3962f71dc048d99df793d78356081835b5bd1674 Mon Sep 17 00:00:00 2001 From: Latitia M Haskins Date: Thu, 27 Oct 2016 09:33:45 -0400 Subject: [PATCH] FAB-575: New tests (& remote execution capability) First pass. Adding new tests This patchset is the code and tests executed for the v0.6 release of fabric. This code includes changes made for the remote execution of specified behave tests in different environments. This includes adding more users to membersrvc for testing a larger network. Note changes include updating the directory structure to accomodate additional test files. The Makefile also needs to be updated to execute behave tests in the new location. This structure will change in future versions of fabric. Change-Id: I3a0fe01d989afadd11366748ecf285335c99c8cc Signed-off-by: Latitia M Haskins --- .../docker-compose-16-consensus-base.yml | 26 + .../docker-compose-16-consensus.yml | 212 ++++++ ...compose-4-consensus-newpeers_w_upgrade.yml | 66 ++ .../docker-compose-4-consensus-upgrade.yml | 91 +++ bddtests/consensus.feature | 642 ++++++++++++++++++ bddtests/environment.py | 165 +++-- bddtests/peer_basic.feature | 119 ++-- bddtests/steps/bdd_compose_util.py | 8 +- bddtests/steps/bdd_remote_util.py | 83 +++ bddtests/steps/bdd_request_util.py | 90 ++- bddtests/steps/consensus.py | 69 ++ bddtests/steps/peer_basic_impl.py | 68 +- membersrvc/membersrvc.yaml | 110 +++ 13 files changed, 1631 insertions(+), 118 deletions(-) create mode 100644 bddtests/bdd-docker/docker-compose-16-consensus-base.yml create mode 100644 bddtests/bdd-docker/docker-compose-16-consensus.yml create mode 100644 bddtests/bdd-docker/docker-compose-4-consensus-newpeers_w_upgrade.yml create mode 100644 bddtests/bdd-docker/docker-compose-4-consensus-upgrade.yml create mode 100644 bddtests/consensus.feature create mode 100644 bddtests/steps/bdd_remote_util.py create mode 100644 bddtests/steps/consensus.py diff --git a/bddtests/bdd-docker/docker-compose-16-consensus-base.yml b/bddtests/bdd-docker/docker-compose-16-consensus-base.yml new file mode 100644 index 00000000000..c8c0d294651 --- /dev/null +++ b/bddtests/bdd-docker/docker-compose-16-consensus-base.yml @@ -0,0 +1,26 @@ +vpBase: + extends: + file: compose-defaults.yml + service: vp + environment: + - CORE_SECURITY_ENABLED=true + - CORE_PEER_PKI_ECA_PADDR=membersrvc0:7054 + - CORE_PEER_PKI_TCA_PADDR=membersrvc0:7054 + - CORE_PEER_PKI_TLSCA_PADDR=membersrvc0:7054 + - CORE_PEER_PKI_TLS_ROOTCERT_FILE=./bddtests/tlsca.cert + # TODO: Currently required due to issue reading obcca configuration location + - CORE_PBFT_GENERAL_N=16 + # You must set this or consensus will not halt as expected when N is greater than 4 + - CORE_PBFT_GENERAL_F=5 + # The checkpoint interval in sequence numbers + - CORE_PBFT_GENERAL_K=2 + +vpBatch: + extends: + service: vpBase + environment: + - CORE_PEER_VALIDATOR_CONSENSUS_PLUGIN=pbft + - CORE_PBFT_GENERAL_TIMEOUT_REQUEST=10s + - CORE_PBFT_GENERAL_MODE=batch + # TODO: This is used for testing as to assure deployment goes through to block + - CORE_PBFT_GENERAL_BATCHSIZE=1 diff --git a/bddtests/bdd-docker/docker-compose-16-consensus.yml b/bddtests/bdd-docker/docker-compose-16-consensus.yml new file mode 100644 index 00000000000..92d5b500791 --- /dev/null +++ b/bddtests/bdd-docker/docker-compose-16-consensus.yml @@ -0,0 +1,212 @@ +membersrvc0: + extends: + file: compose-defaults.yml + service: membersrvc + +vp0: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp0 + - CORE_SECURITY_ENROLLID=test_vp0 + - CORE_SECURITY_ENROLLSECRET=MwYpmSRjupbT + links: + - membersrvc0 + ports: + - 7050:6060 + +vp1: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp1 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp1 + - CORE_SECURITY_ENROLLSECRET=5wgHK9qqYaPy + links: + - membersrvc0 + - vp0 + +vp2: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp2 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp2 + - CORE_SECURITY_ENROLLSECRET=vQelbRvja7cJ + links: + - membersrvc0 + - vp0 + +vp3: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp3 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp3 + - CORE_SECURITY_ENROLLSECRET=9LKqKH5peurL + links: + - membersrvc0 + - vp0 + +vp4: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp4 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp4 + - CORE_SECURITY_ENROLLSECRET=Pqh90CEW5juZ + links: + - membersrvc0 + - vp0 + +vp5: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp5 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp5 + - CORE_SECURITY_ENROLLSECRET=FfdvDkAdY81P + links: + - membersrvc0 + - vp0 + +vp6: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp6 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp6 + - CORE_SECURITY_ENROLLSECRET=QiXJgHyV4t7A + links: + - membersrvc0 + - vp0 + +vp7: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp7 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp7 + - CORE_SECURITY_ENROLLSECRET=twoKZouEyLyB + links: + - membersrvc0 + - vp0 + +vp8: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp8 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp8 + - CORE_SECURITY_ENROLLSECRET=BxP7QNh778gI + links: + - membersrvc0 + - vp0 + +vp9: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp9 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp9 + - CORE_SECURITY_ENROLLSECRET=wu3F1EwJWHvQ + links: + - membersrvc0 + - vp0 + +vp10: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp10 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp10 + - CORE_SECURITY_ENROLLSECRET=hNeS24SKJtMD + links: + - membersrvc0 + - vp0 + +vp11: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp11 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp11 + - CORE_SECURITY_ENROLLSECRET=ezTbMAUccdLy + links: + - membersrvc0 + - vp0 + +vp12: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp12 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp12 + - CORE_SECURITY_ENROLLSECRET=MSDr2juOIooZ + links: + - membersrvc0 + - vp0 + +vp13: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp13 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp13 + - CORE_SECURITY_ENROLLSECRET=DfPHFoFKj2jl + links: + - membersrvc0 + - vp0 + +vp14: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp14 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp14 + - CORE_SECURITY_ENROLLSECRET=NyxEfwjy7vPL + links: + - membersrvc0 + - vp0 + +vp15: + extends: + file: docker-compose-16-consensus-base.yml + service: vpBatch + environment: + - CORE_PEER_ID=vp15 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp15 + - CORE_SECURITY_ENROLLSECRET=sTHJYI3ndQH+ + links: + - membersrvc0 + - vp0 diff --git a/bddtests/bdd-docker/docker-compose-4-consensus-newpeers_w_upgrade.yml b/bddtests/bdd-docker/docker-compose-4-consensus-newpeers_w_upgrade.yml new file mode 100644 index 00000000000..7723f6b0db6 --- /dev/null +++ b/bddtests/bdd-docker/docker-compose-4-consensus-newpeers_w_upgrade.yml @@ -0,0 +1,66 @@ +membersrvc0: + extends: + file: compose-defaults.yml + service: membersrvc + volumes_from: + - bdddocker_dbstore_membersrvc0_1 + +vp0: + extends: + file: docker-compose-4-consensus-base.yml + service: vpBase + volumes_from: + - bdddocker_dbstore_vp0_1 + environment: + - CORE_PEER_ID=vp0 + - CORE_SECURITY_ENROLLID=test_vp0 + - CORE_SECURITY_ENROLLSECRET=MwYpmSRjupbT + links: + - membersrvc0 + ports: + - 7050:6060 + +vp1: + extends: + file: docker-compose-4-consensus-base.yml + service: vpBase + volumes_from: + - bdddocker_dbstore_vp1_1 + environment: + - CORE_PEER_ID=vp1 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp1 + - CORE_SECURITY_ENROLLSECRET=5wgHK9qqYaPy + links: + - membersrvc0 + - vp0 + +vp2: + extends: + file: docker-compose-4-consensus-base.yml + service: vpBase + volumes_from: + - bdddocker_dbstore_vp2_1 + environment: + - CORE_PEER_ID=vp2 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp2 + - CORE_SECURITY_ENROLLSECRET=vQelbRvja7cJ + links: + - membersrvc0 + - vp0 + +vp3: + extends: + file: docker-compose-4-consensus-base.yml + service: vpBase + volumes_from: + - bdddocker_dbstore_vp3_1 + environment: + - CORE_PEER_ID=vp3 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp3 + - CORE_SECURITY_ENROLLSECRET=9LKqKH5peurL + links: + - membersrvc0 + - vp0 diff --git a/bddtests/bdd-docker/docker-compose-4-consensus-upgrade.yml b/bddtests/bdd-docker/docker-compose-4-consensus-upgrade.yml new file mode 100644 index 00000000000..aaf65b3b016 --- /dev/null +++ b/bddtests/bdd-docker/docker-compose-4-consensus-upgrade.yml @@ -0,0 +1,91 @@ +dbstore_membersrvc0: + image: hyperledger/fabric-membersrvc + volumes: + - /var/hyperledger/production/db + +membersrvc0: + extends: + file: compose-defaults.yml + service: membersrvc + volumes_from: + - dbstore_membersrvc0 + +dbstore_vp0: + image: hyperledger/fabric-peer + volumes: + - /var/hyperledger/production/db + +vp0: + extends: + file: docker-compose-4-consensus-base.yml + service: vpBase + volumes_from: + - dbstore_vp0 + environment: + - CORE_PEER_ID=vp0 + - CORE_SECURITY_ENROLLID=test_vp0 + - CORE_SECURITY_ENROLLSECRET=MwYpmSRjupbT + links: + - membersrvc0 + ports: + - 7050:6060 + +dbstore_vp1: + image: hyperledger/fabric-peer + volumes: + - /var/hyperledger/production/db + +vp1: + extends: + file: docker-compose-4-consensus-base.yml + service: vpBase + volumes_from: + - dbstore_vp1 + environment: + - CORE_PEER_ID=vp1 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp1 + - CORE_SECURITY_ENROLLSECRET=5wgHK9qqYaPy + links: + - membersrvc0 + - vp0 + +dbstore_vp2: + image: hyperledger/fabric-peer + volumes: + - /var/hyperledger/production/db + +vp2: + extends: + file: docker-compose-4-consensus-base.yml + service: vpBase + volumes_from: + - dbstore_vp2 + environment: + - CORE_PEER_ID=vp2 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp2 + - CORE_SECURITY_ENROLLSECRET=vQelbRvja7cJ + links: + - membersrvc0 + - vp0 + +dbstore_vp3: + image: hyperledger/fabric-peer + volumes: + - /var/hyperledger/production/db + +vp3: + extends: + file: docker-compose-4-consensus-base.yml + service: vpBase + volumes_from: + - dbstore_vp3 + environment: + - CORE_PEER_ID=vp3 + - CORE_PEER_DISCOVERY_ROOTNODE=vp0:7051 + - CORE_SECURITY_ENROLLID=test_vp3 + - CORE_SECURITY_ENROLLSECRET=9LKqKH5peurL + links: + - membersrvc0 + - vp0 diff --git a/bddtests/consensus.feature b/bddtests/consensus.feature new file mode 100644 index 00000000000..b34c767ba6f --- /dev/null +++ b/bddtests/consensus.feature @@ -0,0 +1,642 @@ +# +# Test Consensus in Fabric Peers +# +# Tags that can be used and will affect test internals: +# @doNotDecompose will NOT decompose the named compose_yaml after scenario ends. Useful for setting up environment and reviewing after scenario. +# @chaincodeImagesUpToDate use this if all scenarios chaincode images are up to date, and do NOT require building. BE SURE!!! + +Feature: Consensus between peers + As a Fabric developer + I want to run a network of peers + +@scat + Scenario: chaincode example02 with 4 peers and 1 membersrvc, multiple peers stopped + Given we compose "docker-compose-4-consensus-batch.yml" + And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers: + | vp0 | + And I use the following credentials for querying peers: + | peer | username | secret | + | vp0 | test_user0 | MS9qrN8hFjlE | + | vp1 | test_user1 | jGlNl6ImkuDo | + | vp2 | test_user2 | zMflqOKezFiA | + | vp3 | test_user3 | vWdLCE00vJy0 | + + When requesting "/chain" from "vp0" + Then I should get a JSON response with "height" = "1" + + # Deploy + When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0" + | arg1 | arg2 | arg3 | arg4 | + | a | 200 | b | 300 | + Then I should have received a chaincode name + Then I wait up to "60" seconds for transaction to be committed to peers: + | vp1 | vp2 | vp3 | + + # Build up a sizable blockchain, that vp3 will need to validate at startup + When I invoke chaincode "example2" function name "invoke" on "vp0" "30" times + |arg1|arg2|arg3| + | b | a | 1 | + Then I should have received a transactionID + Then I wait up to "120" seconds for transaction to be committed to peers: + | vp1 | vp2 | vp3 | + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | vp2 | vp3 | + Then I should get a JSON response from peers with "result.message" = "230" + | vp0 | vp1 | vp2 | vp3 | + + Given I stop peers: + | vp3 | + + # Invoke a transaction to get vp3 out of sync + When I invoke chaincode "example2" function name "invoke" on "vp0" + |arg1|arg2|arg3| + | a | b | 10 | + Then I should have received a transactionID + Then I wait up to "120" seconds for transaction to be committed to peers: + | vp1 | vp2 | + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | vp2 | + Then I should get a JSON response from peers with "result.message" = "220" + | vp0 | vp1 | vp2 | + + Given I stop peers: + | vp2 | + + # Invoke a transaction to get vp2 out of sync + When I invoke chaincode "example2" function name "invoke" on "vp0" + |arg1|arg2|arg3| + | a | b | 10 | + Then I should have received a transactionID +# Then I wait up to "120" seconds for transaction to be committed to peers that fail: +# | vp1 | + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | + # Should keep the same value as before + Then I should get a JSON response from peers with "result.message" = "220" + | vp0 | vp1 | + + # Now start vp2 again + Given I start peers: + | vp2 | + And I wait "15" seconds + + And I wait "60" seconds + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | vp2 | + Then I should get a JSON response from peers with "result.message" = "210" + | vp0 | vp1 | vp2 | + + # Invoke 6 more txs, this will trigger a state transfer, set a target, and execute new outstanding transactions + When I invoke chaincode "example2" function name "invoke" on "vp0" "10" times + |arg1|arg2|arg3| + | a | b | 10 | + Then I should have received a transactionID + Then I wait up to "60" seconds for transaction to be committed to peers: + | vp1 | vp2 | + # wait a bit longer and let state transfer finish + Then I wait "60" seconds + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | vp2 | + Then I should get a JSON response from peers with "result.message" = "110" + | vp0 | vp1 | vp2 | + + # Now start vp3 again + Given I start peers: + | vp3 | + And I wait "15" seconds + + # Invoke 10 more txs, this will trigger a state transfer, set a target, and execute new outstanding transactions + When I invoke chaincode "example2" function name "invoke" on "vp0" "10" times + |arg1|arg2|arg3| + | a | b | 10 | + Then I should have received a transactionID + Then I wait "180" seconds + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | vp2 | + Then I should get a JSON response from peers with "result.message" = "10" + | vp0 | vp1 | vp2 | + + Given I stop peers: + | vp1 | + + # Invoke a transaction to get vp0 out of sync + When I invoke chaincode "example2" function name "invoke" on "vp0" + |arg1|arg2|arg3| + | a | b | 10 | + Then I should have received a transactionID + Then I wait up to "120" seconds for transaction to be committed to peers: + | vp2 | vp3 | + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp2 | vp3 | + Then I should get a JSON response from peers with "result.message" = "0" + | vp0 | vp2 | vp3 | + + +#@scat +#@doNotDecompose +# Scenario: chaincode example02 with 4 peers and 1 membersrvc, upgrade peer code with same DB +# Given we compose "docker-compose-4-consensus-upgrade.yml" +# And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers: +# | vp0 | +# And I use the following credentials for querying peers: +# | peer | username | secret | +# | vp0 | test_user0 | MS9qrN8hFjlE | +# | vp1 | test_user1 | jGlNl6ImkuDo | +# | vp2 | test_user2 | zMflqOKezFiA | +# | vp3 | test_user3 | vWdLCE00vJy0 | +# +# When requesting "/chain" from "vp0" +# Then I should get a JSON response with "height" = "1" +# +# # Deploy +# When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0" +# | arg1 | arg2 | arg3 | arg4 | +# | a | 200 | b | 300 | +# Then I should have received a chaincode name +# Then I wait up to "120" seconds for transaction to be committed to peers: +# | vp1 | vp2 | vp3 | +# +# # Build up a sizable blockchain, that vp3 will need to validate at startup +# When I invoke chaincode "example2" function name "invoke" on "vp0" "30" times +# |arg1|arg2|arg3| +# | b | a | 1 | +# Then I should have received a transactionID +# Then I wait up to "120" seconds for transaction to be committed to peers: +# | vp1 | vp2 | vp3 | +# +# When I query chaincode "example2" function name "query" with value "a" on peers: +# | vp0 | vp1 | vp2 | vp3 | +# Then I should get a JSON response from peers with "result.message" = "230" +# | vp0 | vp1 | vp2 | vp3 | +# And I wait "5" seconds +# +# When requesting "/chain" from "vp0" +# Then I should "store" the "height" from the JSON response +# +# Given I build new images +# And I fallback using the following credentials +# | username | secret | +# | test_vp0 | MwYpmSRjupbT | +# | test_vp1 | 5wgHK9qqYaPy | +# | test_vp2 | vQelbRvja7cJ | +# | test_vp3 | 9LKqKH5peurL | +# And I wait "60" seconds +# +# When requesting "/chain" from "vp0" +# Then I should get a JSON response with "height" = "previous" +# +# Given I use the following credentials for querying peers: +# | peer | username | secret | +# | vp0 | test_user0 | MS9qrN8hFjlE | +# | vp1 | test_user1 | jGlNl6ImkuDo | +# | vp2 | test_user2 | zMflqOKezFiA | +# | vp3 | test_user3 | vWdLCE00vJy0 | +# +# When I query chaincode "example2" function name "query" with value "a" on peers: +# | vp0 | vp1 | vp2 | +# Then I should get a JSON response from peers with "result.message" = "230" +# | vp0 | vp1 | vp2 | +# And I wait "60" seconds +# +# When I invoke chaincode "example2" function name "invoke" on "vp0" "30" times +# |arg1|arg2|arg3| +# | b | a | 1 | +# Then I should have received a transactionID +# Then I wait up to "120" seconds for transaction to be committed to peers: +# | vp1 | vp2 | vp3 | +# +# When I query chaincode "example2" function name "query" with value "a" on peers: +# | vp0 | vp1 | vp2 | vp3 | +# Then I should get a JSON response from peers with "result.message" = "260" +# And I wait "5" seconds +# +# When requesting "/chain" from "vp0" +# Then I should "store" the "height" from the JSON response +# +# Given I upgrade using the following credentials +# | username | secret | +# | test_vp0 | MwYpmSRjupbT | +# | test_vp1 | 5wgHK9qqYaPy | +# | test_vp2 | vQelbRvja7cJ | +# | test_vp3 | 9LKqKH5peurL | +# And I wait "60" seconds +# +# When I query chaincode "example2" function name "query" with value "a" on peers: +# | vp0 | vp1 | vp2 | +# Then I should get a JSON response from peers with "result.message" = "260" +# | vp0 | vp1 | vp2 | +# +# When requesting "/chain" from "vp0" +# Then I should get a JSON response with "height" = "previous" +# +# When I invoke chaincode "example2" function name "invoke" on "vp0" "30" times +# |arg1|arg2|arg3| +# | b | a | 1 | +# Then I should have received a transactionID +# Then I wait up to "120" seconds for transaction to be committed to peers: +# | vp1 | vp2 | vp3 | +# +# When I query chaincode "example2" function name "query" with value "a" on peers: +# | vp0 | vp1 | vp2 | vp3 | +# Then I should get a JSON response from peers with "result.message" = "290" + + +@scat + Scenario: chaincode example02 with 4 peers and 1 membersrvc, stop vp1 + + Given we compose "docker-compose-4-consensus-batch.yml" + And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers: + | vp0 | + And I use the following credentials for querying peers: + | peer | username | secret | + | vp0 | test_user0 | MS9qrN8hFjlE | + | vp1 | test_user1 | jGlNl6ImkuDo | + | vp2 | test_user2 | zMflqOKezFiA | + | vp3 | test_user3 | vWdLCE00vJy0 | + + When requesting "/chain" from "vp0" + Then I should get a JSON response with "height" = "1" + + # Deploy + When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0" + | arg1 | arg2 | arg3 | arg4 | + | a | 100 | b | 200 | + Then I should have received a chaincode name + Then I wait up to "60" seconds for transaction to be committed to peers: + | vp1 | vp2 | vp3 | + + Given I stop peers: + | vp1 | + + # Execute one request to get vp1 out of sync + When I invoke chaincode "example2" function name "invoke" on "vp0" + |arg1|arg2|arg3| + | b | a | 1 | + Then I should have received a transactionID + Then I wait up to "60" seconds for transaction to be committed to peers: + | vp2 | vp3 | + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp2 | vp3 | + Then I should get a JSON response from peers with "result.message" = "101" + | vp0 | vp2 | vp3 | + + # Now start vp1 again + Given I start peers: + | vp1 | + + # Invoke some more txs, this will trigger a state transfer, but it cannot complete + When I invoke chaincode "example2" function name "invoke" on "vp0" "8" times + |arg1|arg2|arg3| + | a | b | 10 | + Then I should have received a transactionID + Then I wait up to "60" seconds for transaction to be committed to peers: + | vp2 | vp3 | + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp2 | vp3 | + Then I should get a JSON response from peers with "result.message" = "21" + + # Force vp1 to attempt to sync with the rest of the peers + When I invoke chaincode "example2" function name "invoke" on "vp1" + |arg1|arg2|arg3| + | a | b | 10 | + And I unconditionally query chaincode "example2" function name "query" with value "a" on peers: + | vp1 | + Then I should get a JSON response from peers with "error.data" = "Error when querying chaincode: Error: state may be inconsistent, cannot query" + | vp1 | + + +@scat + Scenario: Peers catch up only when necessary + Given we compose "docker-compose-4-consensus-upgrade.yml" + And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers: + | vp0 | + And I use the following credentials for querying peers: + | peer | username | secret | + | vp0 | test_user0 | MS9qrN8hFjlE | + | vp1 | test_user1 | jGlNl6ImkuDo | + | vp2 | test_user2 | zMflqOKezFiA | + | vp3 | test_user3 | vWdLCE00vJy0 | + + When requesting "/network/peers" from "vp0" + Then I should get a JSON response with array "peers" contains "4" elements + + # Deploy + When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0" + | arg1 | arg2 | arg3 | arg4 | + | a | 200 | b | 300 | + Then I should have received a chaincode name + Then I wait up to "240" seconds for transaction to be committed to peers: + | vp0 | vp1 | vp2 | vp3 | + + # Build up a sizable blockchain, that vp3 will need to validate at startup + When I invoke chaincode "example2" function name "invoke" on "vp0" "30" times + |arg1|arg2|arg3| + | b | a | 1 | + Then I should have received a transactionID + Then I wait up to "240" seconds for transaction to be committed to peers: + | vp0 | vp1 | vp2 | vp3 | + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | vp2 | vp3 | + Then I should get a JSON response from peers with "result.message" = "230" + | vp0 | vp1 | vp2 | vp3 | + And I wait "5" seconds + + Given I stop peers: + | vp1 | + + # Invoke a transaction to get vp1 out of sync + When I invoke chaincode "example2" function name "invoke" on "vp0" "1" times + |arg1|arg2|arg3| + | a | b | 10 | + Then I should have received a transactionID + Then I wait up to "240" seconds for transaction to be committed to peers: + | vp0 | vp2 | vp3 | + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp2 | vp3 | + Then I should get a JSON response from peers with "result.message" = "220" + | vp0 | vp2 | vp3 | + + When requesting "/chain" from "vp0" + Then I should "store" the "height" from the JSON response + When requesting "/chain" from "vp2" + Then I should get a JSON response with "height" = "previous" + When requesting "/chain" from "vp3" + Then I should get a JSON response with "height" = "previous" + + Given I start peers: + | vp1 | + And I wait "30" seconds + + When I invoke chaincode "example2" function name "invoke" on "vp0" "10" times + |arg1|arg2|arg3| + | a | b | 1 | + Then I should have received a transactionID + Then I wait up to "240" seconds for transaction to be committed to peers: + | vp0 | vp2 | vp3 | + And I wait "60" seconds + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp2 | vp3 | + Then I should get a JSON response from peers with "result.message" = "210" + | vp0 | vp2 | vp3 | + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp1 | + Then I should get a JSON response from peers with "result.message" = "220" + | vp1 | + + +#@doNotDecompose + Scenario: 16 peer network - basic consensus + Given we compose "docker-compose-16-consensus.yml" + And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers: + | vp0 | + And I use the following credentials for querying peers: + | peer | username | secret | + | vp0 | test_user0 | MS9qrN8hFjlE | + | vp1 | test_user1 | jGlNl6ImkuDo | + | vp2 | test_user2 | zMflqOKezFiA | + | vp3 | test_user3 | vWdLCE00vJy0 | + | vp4 | test_user4 | 4nXSrfoYGFCP | + | vp5 | test_user5 | yg5DVhm0er1z | + | vp6 | test_user6 | b7pmSxzKNFiw | + | vp7 | test_user7 | YsWZD4qQmYxo | + | vp8 | test_user8 | W8G0usrU7jRk | + | vp9 | test_user9 | H80SiB5ODKKQ | + | vp10 | test_user10 | n21Dq435t9S1 | + | vp11 | test_user11 | 6S0UjokSRHYh | + | vp12 | test_user12 | dpodq6r2+NPu | + | vp13 | test_user13 | 9XZFoBjXJ5zM | + | vp14 | test_user14 | 6lOOiQXW5uXM | + | vp15 | test_user15 | PTyW9AVbBSjk | + + When requesting "/network/peers" from "vp0" + Then I should get a JSON response with array "peers" contains "16" elements + + # Deploy + When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0" + | arg1 | arg2 | arg3 | arg4 | + | a | 200 | b | 300 | + Then I should have received a chaincode name + Then I wait up to "240" seconds for transaction to be committed to peers: + | vp1 | vp2 | vp3 | vp4 | vp5 | vp6 | vp7 | vp8 | vp9 | vp10 | vp11 | vp12 | vp13 | vp14 | vp15 | + + When I invoke chaincode "example2" function name "invoke" on "vp0" "10" times + |arg1|arg2|arg3| + | b | a | 1 | + Then I should have received a transactionID + Then I wait up to "240" seconds for transaction to be committed to peers: + | vp1 | vp2 | vp3 | vp4 | vp5 | vp6 | vp7 | vp8 | vp9 | vp10 | vp11 | vp12 | vp13 | vp14 | vp15 | + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | vp2 | vp3 | + Then I should get a JSON response from peers with "result.message" = "210" + | vp0 | vp1 | vp2 | vp3 | + And I wait "15" seconds + + When requesting "/chain" from "vp0" + Then I should "store" the "height" from the JSON response + Given I stop peers: + | vp1 | vp2 | vp3 | vp4 | vp5 | vp6 | + + When I invoke chaincode "example2" function name "invoke" on "vp0" "5" times + |arg1|arg2|arg3| + | a | b | 1 | + And I wait "5" seconds + + When requesting "/chain" from "vp0" + Then I should get a JSON response with "height" = "previous" + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp7 | vp8 | vp9 | vp10 | vp11 | vp12 | vp13 | vp14 | vp15 | + Then I should get a JSON response from peers with "result.message" = "210" + | vp0 | vp7 | vp8 | vp9 | vp10 | vp11 | vp12 | vp13 | vp14 | vp15 | + + Given I start peers: + | vp3 | + And I wait "15" seconds + + When I invoke chaincode "example2" function name "invoke" on "vp0" + |arg1|arg2|arg3| + | a | b | 10 | + Then I should have received a transactionID + Then I wait up to "120" seconds for transaction to be committed to peers: + | vp0 | vp3 | vp8 | vp9 | vp10 | vp11 | vp12 | vp13 | vp14 | vp15 | + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp3 | vp8 | vp9 | vp10 | vp11 | vp12 | vp13 | vp14 | vp15 | + Then I should get a JSON response from peers with "result.message" = "200" + | vp0 | vp3 | vp8 | vp9 | vp10 | vp11 | vp12 | vp13 | vp14 | vp15 | + + When requesting "/chain" from "vp0" + Then I should get a JSON response with "height" > "previous" + + +#@doNotDecompose + Scenario: Take down the chaincode! + Given we compose "docker-compose-4-consensus-upgrade.yml" + And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers: + | vp0 | + And I use the following credentials for querying peers: + | peer | username | secret | + | vp0 | test_user0 | MS9qrN8hFjlE | + | vp1 | test_user1 | jGlNl6ImkuDo | + | vp2 | test_user2 | zMflqOKezFiA | + | vp3 | test_user3 | vWdLCE00vJy0 | + + When requesting "/chain" from "vp0" + Then I should get a JSON response with "height" = "1" + + # Deploy + When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0" + | arg1 | arg2 | arg3 | arg4 | + | a | 200 | b | 300 | + Then I should have received a chaincode name + Then I wait up to "120" seconds for transaction to be committed to peers: + | vp0 | vp1 | vp2 | vp3 | + + # Build up a sizable blockchain, that vp3 will need to validate at startup + When I invoke chaincode "example2" function name "invoke" on "vp0" "30" times + |arg1|arg2|arg3| + | b | a | 1 | + Then I should have received a transactionID + Then I wait up to "120" seconds for transaction to be committed to peers: + | vp0 | vp1 | vp2 | vp3 | + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | vp2 | vp3 | + Then I should get a JSON response from peers with "result.message" = "230" + | vp0 | vp1 | vp2 | vp3 | + And I wait "5" seconds + + Given I stop the chaincode + And I remove the chaincode images + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | vp2 | vp3 | + Then I wait "30" seconds + And I should get a JSON response from peers with "result.message" = "230" + + +#@doNotDecompose + Scenario: Take down the chaincode and stop the peers! + Given we compose "docker-compose-4-consensus-upgrade.yml" + And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers: + | vp0 | + And I use the following credentials for querying peers: + | peer | username | secret | + | vp0 | test_user0 | MS9qrN8hFjlE | + | vp1 | test_user1 | jGlNl6ImkuDo | + | vp2 | test_user2 | zMflqOKezFiA | + | vp3 | test_user3 | vWdLCE00vJy0 | + + When requesting "/chain" from "vp0" + Then I should get a JSON response with "height" = "1" + + # Deploy + When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0" + | arg1 | arg2 | arg3 | arg4 | + | a | 200 | b | 300 | + Then I should have received a chaincode name + Then I wait up to "120" seconds for transaction to be committed to peers: + | vp0 | vp1 | vp2 | vp3 | + + # Build up a sizable blockchain, that vp3 will need to validate at startup + When I invoke chaincode "example2" function name "invoke" on "vp0" "30" times + |arg1|arg2|arg3| + | b | a | 1 | + Then I should have received a transactionID + Then I wait up to "120" seconds for transaction to be committed to peers: + | vp0 | vp1 | vp2 | vp3 | + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | vp2 | vp3 | + Then I should get a JSON response from peers with "result.message" = "230" + | vp0 | vp1 | vp2 | vp3 | + And I wait "5" seconds + + Given I stop the chaincode + And I remove the chaincode images + And I stop peers: + | vp0 | vp1 | vp2 | vp3 | + And I wait "15" seconds + And I start peers: + | vp0 | vp1 | vp2 | vp3 | + And I wait "30" seconds + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | vp2 | vp3 | + Then I wait "30" seconds + And I should get a JSON response from peers with "result.message" = "230" + | vp0 | vp1 | vp2 | vp3 | + + +#@doNotDecompose + Scenario: Take down the chaincode and the peers! + Given we compose "docker-compose-4-consensus-upgrade.yml" + And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers: + | vp0 | + And I use the following credentials for querying peers: + | peer | username | secret | + | vp0 | test_user0 | MS9qrN8hFjlE | + | vp1 | test_user1 | jGlNl6ImkuDo | + | vp2 | test_user2 | zMflqOKezFiA | + | vp3 | test_user3 | vWdLCE00vJy0 | + + When requesting "/chain" from "vp0" + Then I should get a JSON response with "height" = "1" + + # Deploy + When I deploy chaincode "github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02" with ctor "init" to "vp0" + | arg1 | arg2 | arg3 | arg4 | + | a | 200 | b | 300 | + Then I should have received a chaincode name + Then I wait up to "120" seconds for transaction to be committed to peers: + | vp0 | vp1 | vp2 | vp3 | + + # Build up a sizable blockchain, that vp3 will need to validate at startup + When I invoke chaincode "example2" function name "invoke" on "vp0" "30" times + |arg1|arg2|arg3| + | b | a | 1 | + Then I should have received a transactionID + Then I wait up to "120" seconds for transaction to be committed to peers: + | vp0 | vp1 | vp2 | vp3 | + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | vp2 | vp3 | + Then I should get a JSON response from peers with "result.message" = "230" + | vp0 | vp1 | vp2 | vp3 | + And I wait "5" seconds + + When requesting "/chain" from "vp0" + Then I should "store" the "height" from the JSON response + + Given I stop the chaincode + And I remove the chaincode images + And I stop peers: + | vp0 | vp1 | vp2 | vp3 | + And I wait "15" seconds + And we compose "docker-compose-4-consensus-newpeers_w_upgrade.yml" + And I wait "30" seconds + + When requesting "/chain" from "vp0" + Then I should get a JSON response with "height" = "previous" + + Given I use the following credentials for querying peers: + | peer | username | secret | + | vp0 | test_user0 | MS9qrN8hFjlE | + | vp1 | test_user1 | jGlNl6ImkuDo | + | vp2 | test_user2 | zMflqOKezFiA | + | vp3 | test_user3 | vWdLCE00vJy0 | + + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | vp2 | vp3 | + Then I wait "30" seconds + And I should get a JSON response from peers with "result.message" = "230" + | vp0 | vp1 | vp2 | vp3 | diff --git a/bddtests/environment.py b/bddtests/environment.py index 0bd7c10d93f..7dc24a7f6fc 100644 --- a/bddtests/environment.py +++ b/bddtests/environment.py @@ -1,7 +1,9 @@ import subprocess import os import glob +import json, time +from steps import bdd_remote_util from steps.bdd_test_util import cli_call, bdd_log from steps.bdd_compose_util import getDockerComposeFileArgsFromYamlFile @@ -10,59 +12,103 @@ def coverageEnabled(context): return context.config.userdata.get("coverage", "false") == "true" + +def tlsEnabled(context): + return context.config.userdata.get("tls", "false") == "true" + + +def retrieve_logs(context, scenario): + bdd_log("Scenario {0} failed. Getting container logs".format(scenario.name)) + file_suffix = "_" + scenario.name.replace(" ", "_") + ".log" + if context.compose_containers == []: + bdd_log("docker-compose command failed on '{0}'. There are no docker logs.".format(context.compose_yaml)) + + # get logs from the peer containers + for containerData in context.compose_containers: + with open(containerData.containerName + file_suffix, "w+") as logfile: + sys_rc = subprocess.call(["docker", "logs", containerData.containerName], stdout=logfile, stderr=logfile) + if sys_rc !=0 : + bdd_log("Cannot get logs for {0}. Docker rc = {1}".format(containerData.containerName,sys_rc)) + + # get logs from the chaincode containers + cc_output, cc_error, cc_returncode = \ + cli_call(["docker", "ps", "-f", "name=dev-", "--format", "{{.Names}}"], expect_success=True) + for containerName in cc_output.splitlines(): + namePart,sep,junk = containerName.rpartition("-") + with open(namePart + file_suffix, "w+") as logfile: + sys_rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile) + if sys_rc !=0 : + bdd_log("Cannot get logs for {0}. Docker rc = {1}".format(namepart,sys_rc)) + + +def decompose_containers(context, scenario): + fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml) + + bdd_log("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name)) + context.compose_output, context.compose_error, context.compose_returncode = \ + cli_call(["docker-compose"] + fileArgsToDockerCompose + ["unpause"], expect_success=True) + context.compose_output, context.compose_error, context.compose_returncode = \ + cli_call(["docker-compose"] + fileArgsToDockerCompose + ["stop"], expect_success=True) + + if coverageEnabled(context): + #Save the coverage files for this scenario before removing containers + containerNames = [containerData.containerName for containerData in context.compose_containers] + saveCoverageFiles("coverage", scenario.name.replace(" ", "_"), containerNames, "cov") + + context.compose_output, context.compose_error, context.compose_returncode = \ + cli_call(["docker-compose"] + fileArgsToDockerCompose + ["rm","-f"], expect_success=True) + # now remove any other containers (chaincodes) + context.compose_output, context.compose_error, context.compose_returncode = \ + cli_call(["docker", "ps", "-qa"], expect_success=True) + if context.compose_returncode == 0: + # Remove each container + for containerId in context.compose_output.splitlines(): + context.compose_output, context.compose_error, context.compose_returncode = \ + cli_call(["docker", "rm", "-f", containerId], expect_success=True) + + + +def before_scenario(context, scenario): + context.compose_containers = [] + context.compose_yaml = "" + + def after_scenario(context, scenario): + # Handle logs get_logs = context.config.userdata.get("logs", "N") - if get_logs.lower() == "force" or (scenario.status == "failed" and get_logs.lower() == "y" and "compose_containers" in context): - bdd_log("Scenario {0} failed. Getting container logs".format(scenario.name)) - file_suffix = "_" + scenario.name.replace(" ", "_") + ".log" - # get logs from the peer containers - for containerData in context.compose_containers: - with open(containerData.name + file_suffix, "w+") as logfile: - sys_rc = subprocess.call(["docker", "logs", containerData.name], stdout=logfile, stderr=logfile) - if sys_rc !=0 : - bdd_log("Cannot get logs for {0}. Docker rc = {1}".format(containerData.name,sys_rc)) - # get logs from the chaincode containers - cc_output, cc_error, cc_returncode = \ - cli_call(["docker", "ps", "-f", "name=dev-", "--format", "{{.Names}}"], expect_success=True) - for containerName in cc_output.splitlines(): - namePart,sep,junk = containerName.rpartition("-") - with open(namePart + file_suffix, "w+") as logfile: - sys_rc = subprocess.call(["docker", "logs", containerName], stdout=logfile, stderr=logfile) - if sys_rc !=0 : - bdd_log("Cannot get logs for {0}. Docker rc = {1}".format(namepart,sys_rc)) - if 'doNotDecompose' in scenario.tags: - if 'compose_yaml' in context: - bdd_log("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml)) - else: - if 'compose_yaml' in context: - fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml) + if get_logs.lower() == "force" or (scenario.status == "failed" and get_logs.lower() == "y"): + retrieve_logs(context, scenario) - bdd_log("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name)) - context.compose_output, context.compose_error, context.compose_returncode = \ - cli_call(["docker-compose"] + fileArgsToDockerCompose + ["unpause"], expect_success=True) - context.compose_output, context.compose_error, context.compose_returncode = \ - cli_call(["docker-compose"] + fileArgsToDockerCompose + ["stop"], expect_success=True) + # Handle coverage + if coverageEnabled(context): + for containerData in context.compose_containers: + #Save the coverage files for this scenario before removing containers + containerNames = [containerData.name for containerData in context.compose_containers] + saveCoverageFiles("coverage", scenario.name.replace(" ", "_"), containerNames, "cov") - if coverageEnabled(context): - #Save the coverage files for this scenario before removing containers - containerNames = [containerData.name for containerData in context.compose_containers] - saveCoverageFiles("coverage", scenario.name.replace(" ", "_"), containerNames, "cov") + # Handle decomposition + if 'doNotDecompose' in scenario.tags and 'compose_yaml' in context: + bdd_log("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml)) + elif context.byon: + bdd_log("Stopping a BYON (Bring Your Own Network) setup") + decompose_remote(context, scenario) + elif 'compose_yaml' in context: + decompose_containers(context, scenario) + else: + bdd_log("Nothing to stop in this setup") - context.compose_output, context.compose_error, context.compose_returncode = \ - cli_call(["docker-compose"] + fileArgsToDockerCompose + ["rm","-f"], expect_success=True) - # now remove any other containers (chaincodes) - context.compose_output, context.compose_error, context.compose_returncode = \ - cli_call(["docker", "ps", "-qa"], expect_success=True) - if context.compose_returncode == 0: - # Remove each container - for containerId in context.compose_output.splitlines(): - #bdd_log("docker rm {0}".format(containerId)) - context.compose_output, context.compose_error, context.compose_returncode = \ - cli_call(["docker", "rm", "-f", containerId], expect_success=True) # stop any running peer that could get in the way before starting the tests def before_all(context): + context.byon = os.path.exists("networkcredentials") + context.remote_ip = context.config.userdata.get("remote-ip", None) + context.tls = tlsEnabled(context) + if context.byon: + context = get_remote_servers(context) + time.sleep(5) + else: cli_call(["../build/bin/peer", "node", "stop"], expect_success=False) + #cli_call(["../../build/bin/peer", "node", "stop"], expect_success=False) # stop any running peer that could get in the way before starting the tests def after_all(context): @@ -70,3 +116,34 @@ def after_all(context): if coverageEnabled(context): createCoverageAggregate() + +########################################## +def get_remote_servers(context): + with open("networkcredentials", "r") as network_file: + network_creds = json.loads(network_file.read()) + context.remote_servers = [{'ip': peer['host'], 'port': peer['port']} for peer in network_creds['PeerData']] + context.remote_map = {} + for peer in network_creds['PeerData']: + context.remote_map[peer['name']] = {'ip': peer['host'], 'port': peer['port']} + context.remote_user = network_creds["CA_username"] + context.remote_secret = network_creds["CA_secret"] + context.user_creds = network_creds['UserData'] + context.remote_ip = context.config.userdata.get("remote-ip", None) + return context + +def decompose_remote(context, scenario): + context = get_remote_servers(context) + headers = {'Content-type': 'application/vnd.ibm.zaci.payload+json;version=1.0', + 'Accept': 'application/vnd.ibm.zaci.payload+json;version=1.0', + 'zACI-API': 'com.ibm.zaci.system/1.0'} + if context.remote_ip: + for target in ["vp0", "vp1", "vp2", "vp3"]: + status = bdd_remote_util.getNodeStatus(context, target).json() + if status.get(target, "Unknown") != "STARTED": + status = bdd_remote_util.restartNode(context, target) + time.sleep(60) + else: + command = " export SUDO_ASKPASS=~/.remote_pass.sh;sudo iptables -A INPUT -p tcp --destination-port 30303 -j DROP" + ssh_call(context, command) + + diff --git a/bddtests/peer_basic.feature b/bddtests/peer_basic.feature index c69f7e2f234..1d80fafe1f3 100644 --- a/bddtests/peer_basic.feature +++ b/bddtests/peer_basic.feature @@ -11,6 +11,7 @@ Feature: Network of Peers I want to run a network of peers # @wip +@smoke Scenario: Peers list test, single peer issue #827 Given we compose "docker-compose-1.yml" When requesting "/network/peers" from "vp0" @@ -390,6 +391,7 @@ Feature: Network of Peers # @doNotDecompose # @wip +@smoke Scenario: chaincode example 02 single peer Given we compose "docker-compose-1.yml" When requesting "/chain" from "vp0" @@ -460,6 +462,8 @@ Feature: Network of Peers # @doNotDecompose # @wip +@scat +@smoke @issue_567 Scenario Outline: chaincode example02 with 4 peers and 1 membersrvc, issue #567 @@ -480,7 +484,7 @@ Feature: Network of Peers | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | vp3 | @@ -492,7 +496,7 @@ Feature: Network of Peers | a | b | 20 | Then I should have received a transactionID Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | vp3 | @@ -505,11 +509,11 @@ Feature: Network of Peers | docker-compose-4-consensus-batch.yml | 60 | - #@doNotDecompose - #@wip +@scat @issue_680 @fab380 Scenario Outline: chaincode example02 with 4 peers and 1 membersrvc, issue #680 (State transfer) + Given we compose "" And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers: | vp0 | @@ -530,7 +534,7 @@ Feature: Network of Peers | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | # Build up a sizable blockchain, that vp3 will need to validate at startup When I invoke chaincode "example2" function name "invoke" on "vp0" "30" times @@ -538,14 +542,13 @@ Feature: Network of Peers | b | a | 1 | Then I should have received a transactionID Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | vp3 | Then I should get a JSON response from peers with "result.message" = "130" | vp0 | vp1 | vp2 | vp3 | - # STOPPING vp3!!!!!!!!!!!!!!!!!!!!!!!!!! Given I stop peers: | vp3 | @@ -555,7 +558,7 @@ Feature: Network of Peers | a | b | 10 | Then I should have received a transactionID Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | + | vp1 | vp2 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | @@ -572,7 +575,7 @@ Feature: Network of Peers | a | b | 10 | Then I should have received a transactionID Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | vp3 | Then I should get a JSON response from peers with "result.message" = "20" @@ -585,6 +588,8 @@ Feature: Network of Peers | docker-compose-4-consensus-batch.yml docker-compose-4-consensus-batch-nosnapshotbuffer.yml | 60 | +# @doNotDecompose +@scat @issue_724 Scenario Outline: chaincode example02 with 4 peers and 1 membersrvc, issue #724 @@ -605,7 +610,7 @@ Feature: Network of Peers | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: @@ -641,6 +646,8 @@ Feature: Network of Peers Then I should get a JSON response with "height" = "1" +@scat +@smoke Scenario Outline: 4 peers and 1 membersrvc, consensus still works if one backup replica fails Given we compose "" @@ -662,7 +669,7 @@ Feature: Network of Peers | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | # get things started. All peers up and executing Txs When I invoke chaincode "example2" function name "invoke" on "vp0" "5" times @@ -670,7 +677,7 @@ Feature: Network of Peers | a | b | 1 | Then I should have received a transactionID Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | vp3 | @@ -687,7 +694,7 @@ Feature: Network of Peers | a | b | 1 | Then I should have received a transactionID Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp3 | + | vp1 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp3 | @@ -698,6 +705,7 @@ Feature: Network of Peers | ComposeFile | WaitTime | | docker-compose-4-consensus-batch.yml | 60 | +@scat Scenario Outline: 4 peers and 1 membersrvc, consensus fails if 2 backup replicas fail Given we compose "" @@ -719,7 +727,7 @@ Feature: Network of Peers | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | # get things started. All peers up and executing Txs When I invoke chaincode "example2" function name "invoke" on "vp0" "5" times @@ -727,14 +735,13 @@ Feature: Network of Peers | a | b | 1 | Then I should have received a transactionID Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | vp3 | Then I should get a JSON response from peers with "result.message" = "95" | vp0 | vp1 | vp2 | vp3 | - # STOP vp2 Given I stop peers: | vp1 | vp2 | @@ -777,14 +784,14 @@ Feature: Network of Peers | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | + | vp1 | vp2 | When I invoke chaincode "example2" function name "invoke" on "vp0" "50" times |arg1|arg2|arg3| | a | b | 1 | Then I should have received a transactionID Then I wait up to "60" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | + | vp1 | vp2 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | @@ -821,12 +828,13 @@ Feature: Network of Peers | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | + | vp1 | vp2 | Examples: Consensus Options | ComposeFile | WaitTime | | docker-compose-4-consensus-batch.yml docker-compose-4-consensus-nvp0.yml | 60 | +@scat @issue_1000 Scenario Outline: chaincode example02 with 4 peers and 1 membersrvc, test crash fault @@ -849,7 +857,7 @@ Feature: Network of Peers | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | # Build up a sizable blockchain, to advance the sequence number When I invoke chaincode "example2" function name "invoke" on "vp0" "30" times @@ -857,14 +865,13 @@ Feature: Network of Peers | b | a | 1 | Then I should have received a transactionID Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: - | vp0 | vp1 | vp2 | vp3 | +| Then I should get a JSON response from peers with "result.message" = "130" | vp0 | vp1 | vp2 | vp3 | - # Stop vp1, vp2, vp3 Given I stop peers: | vp1 | vp2 | vp3 | @@ -878,7 +885,7 @@ Feature: Network of Peers | a | b | 10 | Then I should have received a transactionID Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | + | vp1 | vp2 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | Then I should get a JSON response from peers with "result.message" = "120" @@ -891,6 +898,7 @@ Feature: Network of Peers +@scat @issue_1091 @doNotDecompose Scenario Outline: chaincode example02 with 4 peers and 1 membersrvc, issue #1091 (out of date peer) @@ -914,9 +922,8 @@ Feature: Network of Peers | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | + | vp1 | vp2 | - # STOPPING vp3!!!!!!!!!!!!!!!!!!!!!!!!!! Given I stop peers: | vp3 | @@ -926,14 +933,13 @@ Feature: Network of Peers | b | a | 1 | Then I should have received a transactionID Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | + | vp1 | vp2 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | Then I should get a JSON response from peers with "result.message" = "101" | vp0 | vp1 | vp2 | - # Now start vp3 again Given I start peers, waiting up to "15" seconds for them to be ready: | vp3 | @@ -943,10 +949,9 @@ Feature: Network of Peers | a | b | 10 | Then I should have received a transactionID Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | + | vp1 | vp2 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | - | vp0 | vp1 | vp2 | Then I should get a JSON response from peers with "result.message" = "21" # Force VP3 to attempt to sync with the rest of the peers @@ -985,7 +990,7 @@ Feature: Network of Peers | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "60" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | + | vp1 | vp2 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | @@ -997,7 +1002,7 @@ Feature: Network of Peers | a | b | 1 | Then I should have received a transactionID Then I wait up to "20" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | + | vp1 | vp2 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | @@ -1025,12 +1030,11 @@ Feature: Network of Peers | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | When requesting "/chain" from "vp0" Then I should get a JSON response with "height" = "2" - # STOP vp0 Given I stop peers: | vp0 | @@ -1042,7 +1046,7 @@ Feature: Network of Peers | a | b | 1 | Then I should have received a transactionID Then I wait up to "" seconds for transaction to be committed to peers: - | vp1 | vp2 | vp3 | + | vp2 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp1 | vp2 | vp3 | Then I should get a JSON response from peers with "result.message" = "95" @@ -1075,7 +1079,9 @@ Feature: Network of Peers | docker-compose-2.yml | +@scat @issue_1942 +# @doNotDecompose Scenario: chaincode example02 with 4 peers, stop and start alternates, reverse Given we compose "docker-compose-4-consensus-batch.yml" And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers: @@ -1095,7 +1101,7 @@ Scenario: chaincode example02 with 4 peers, stop and start alternates, reverse | a | 1000 | b | 0 | Then I should have received a chaincode name Then I wait up to "60" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | vp3 | @@ -1111,8 +1117,8 @@ Scenario: chaincode example02 with 4 peers, stop and start alternates, reverse |arg1|arg2|arg3| | a | b | 1 | Then I should have received a transactionID - Then I wait up to "60" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp3 | + Then I wait up to "180" seconds for transaction to be committed to peers: + | vp1 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp3 | @@ -1127,14 +1133,15 @@ Scenario: chaincode example02 with 4 peers, stop and start alternates, reverse When I invoke chaincode "example2" function name "invoke" on "vp3" "20" times |arg1|arg2|arg3| | a | b | 1 | - Then I wait up to "60" seconds for transactions to be committed to peers: - | vp0 | vp2 | vp3 | + Then I wait up to "180" seconds for transactions to be committed to peers: + | vp2 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp2 | vp3 | Then I should get a JSON response from peers with "result.message" = "977" | vp0 | vp2 | vp3 | +@scat @issue_1874a #@doNotDecompose Scenario: chaincode example02 with 4 peers, two stopped @@ -1156,7 +1163,7 @@ Scenario: chaincode example02 with 4 peers, two stopped | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "60" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | vp3 | @@ -1176,7 +1183,8 @@ Scenario: chaincode example02 with 4 peers, two stopped # Make sure vp3 catches up first Then I wait up to "60" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp3 | + #| vp0 | vp1 | vp3 | + | vp1 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp3 | Then I should get a JSON response from peers with "result.message" = "90" @@ -1187,14 +1195,17 @@ Scenario: chaincode example02 with 4 peers, two stopped | a | b | 10 | Then I should have received a transactionID Then I wait up to "60" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp3 | + #| vp0 | vp1 | vp3 | + | vp1 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp3 | Then I should get a JSON response from peers with "result.message" = "0" | vp0 | vp1 | vp3 | +@scat @issue_1874b +#@doNotDecompose Scenario: chaincode example02 with 4 peers, two stopped, bring back vp0 Given we compose "docker-compose-4-consensus-batch.yml" And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers: @@ -1214,7 +1225,7 @@ Scenario: chaincode example02 with 4 peers, two stopped, bring back vp0 | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "60" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | + | vp1 | vp2 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | vp3 | @@ -1243,22 +1254,19 @@ Scenario: chaincode example02 with 4 peers, two stopped, bring back vp0 Given I start peers, waiting up to "15" seconds for them to be ready: | vp0 | - # Ensure transaction committed while vp0 was down is part of the ledger - Then I wait up to "60" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | - When I invoke chaincode "example2" function name "invoke" on "vp1" "8" times |arg1|arg2|arg3| | a | b | 10 | Then I should have received a transactionID - Then I wait up to "60" seconds for transactions to be committed to peers: - | vp0 | vp1 | vp2 | + Then I wait up to "60" seconds for transaction to be committed to peers: + | vp1 | vp2 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | Then I should get a JSON response from peers with "result.message" = "0" | vp0 | vp1 | vp2 | +@scat @issue_1874c Scenario: chaincode example02 with 4 peers, two stopped, bring back both Given we compose "docker-compose-4-consensus-batch.yml" @@ -1279,7 +1287,7 @@ Scenario: chaincode example02 with 4 peers, two stopped, bring back both | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "60" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | + | vp1 | vp2 | When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | vp3 | @@ -1302,7 +1310,7 @@ Scenario: chaincode example02 with 4 peers, two stopped, bring back both | a | b | 10 | Then I should have received a transactionID Then I wait up to "60" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | Then I wait "30" seconds When I query chaincode "example2" function name "query" with value "a" on peers: @@ -1310,6 +1318,7 @@ Scenario: chaincode example02 with 4 peers, two stopped, bring back both Then I should get a JSON response from peers with "result.message" = "10" | vp0 | vp1 | vp2 | vp3 | +@scat @issue_2116 #@doNotDecompose Scenario Outline: chaincode authorizable_counter with 4 peers, two stopped, bring back both @@ -1331,7 +1340,7 @@ Scenario: chaincode example02 with 4 peers, two stopped, bring back both | 0 | Then I should have received a chaincode name Then I wait up to "60" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | + | vp1 | vp2 | When I query chaincode "authorizable_counter" function name "read" on "vp0": |arg1| @@ -1348,7 +1357,7 @@ Scenario: chaincode example02 with 4 peers, two stopped, bring back both | a | b | 10 | Then I should have received a transactionID Then I wait up to "30" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | When I query chaincode "authorizable_counter" function name "read" with value "a" on peers: | vp0 | vp1 | vp2 | vp3 | @@ -1365,7 +1374,7 @@ Scenario: chaincode example02 with 4 peers, two stopped, bring back both | a | Then I wait up to "15" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | vp3 | + | vp1 | vp2 | vp3 | When I query chaincode "authorizable_counter" function name "read" with value "a" on peers: | vp0 | vp1 | vp2 | vp3 | diff --git a/bddtests/steps/bdd_compose_util.py b/bddtests/steps/bdd_compose_util.py index bc48126123a..4188451a073 100644 --- a/bddtests/steps/bdd_compose_util.py +++ b/bddtests/steps/bdd_compose_util.py @@ -127,7 +127,9 @@ def containersAreReadyWithinTimeout(context, containers, timeout): bdd_log("All containers should be up by {}".format(formattedTime)) for container in containers: - if not containerIsInitializedByTimestamp(container, timeoutTimestamp): + if 'dbstore' in container.name: + containers.remove(container) + elif not containerIsInitializedByTimestamp(container, timeoutTimestamp): return False peersAreReady = peersAreReadyByTimestamp(context, containers, timeoutTimestamp) @@ -216,7 +218,9 @@ def containerIsPeer(container): # is to determine if the container is listening on the REST port. However, this method # may run before the listening port is ready. Hence, as along as the current # convention of vp[0-9] is adhered to this function will be good enough. - return re.search("vp[0-9]+", container.name, re.IGNORECASE) + if 'dbstore' not in container.name: + return re.search("vp[0-9]+", container.name, re.IGNORECASE) + return False def peerIsReadyByTimestamp(context, peerContainer, allPeerContainers, timeoutTimestamp): while peerIsNotReady(context, peerContainer, allPeerContainers): diff --git a/bddtests/steps/bdd_remote_util.py b/bddtests/steps/bdd_remote_util.py new file mode 100644 index 00000000000..95ab318d62b --- /dev/null +++ b/bddtests/steps/bdd_remote_util.py @@ -0,0 +1,83 @@ +# +# Copyright IBM Corp. 2016 All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from bdd_request_util import httpGet, httpPost, getTokenedHeaders, getToken, getSchema +from bdd_test_util import bdd_log + + +def getNetwork(context): + """ Get the Network ID.""" + if hasattr(context, 'network_id'): + return context.network_id + + headers = getTokenedHeaders(context) + url = "{0}://{1}/api/com.ibm.zBlockchain/networks".format(getSchema(context.tls), context.remote_ip) + response = httpGet(url, headers=headers) + context.network_id = response.json()[0] + return response + + +def stopNode(context, peer): + """Stops the peer node on a specific network.""" + headers = getTokenedHeaders(context) + getNetwork(context) + + url = "{0}://{1}/api/com.ibm.zBlockchain/networks/{2}/nodes/{3}/stop".format(getSchema(context.tls), context.remote_ip, context.network_id, peer) + body = {} + response = httpPost(url, body, headers=headers) + + +def restartNode(context, peer): + """Restart the peer node on a specific network.""" + headers = getTokenedHeaders(context) + getNetwork(context) + url = "{0}://{1}/api/com.ibm.zBlockchain/networks/{2}/nodes/{3}/restart".format(getSchema(context.tls), context.remote_ip, context.network_id, peer) + body = {} + response = httpPost(url, body, headers=headers) + + +def getNodeStatus(context, peer): + """ Get the Node status.""" + headers = getTokenedHeaders(context) + getNetwork(context) + url = "{0}://{1}/api/com.ibm.zBlockchain/networks/{2}/nodes/{3}/status".format(getSchema(context.tls), context.remote_ip, context.network_id, peer) + response = httpGet(url, headers=headers) + return response + + +def getNodeLogs(context): + """ Get the Node logs.""" + headers = getTokenedHeaders(context) + getNetwork(context) + url = "{0}://{1}/api/com.ibm.zBlockchain/networks/{2}/nodes/{3}/logs".format(getSchema(context.tls), context.remote_ip, context.network_id, peer) + response = httpGet(url, headers=headers) + return response + + +def getChaincodeLogs(context, peer): + """ Get the Chaincode logs.""" + headers = getTokenedHeaders(context) + getNetwork(context) + # /api/com.ibm.zBlockchain/networks/{network_id}/nodes/{node_id}/chaincodes/{chaincode_id}/logs + #url = "{0}://{1}/api/com.ibm.zBlockchain/networks/{2}/nodes/{3}/chaincodes/{4}/logs".format(getSchema(context.tls), context.remote_ip, context.network_id, peer, context.chaincodeSpec['chaincodeID']['name']) + if hasattr(context, 'chaincodeSpec'): + url = "{0}://{1}/api/com.ibm.zBlockchain/networks/{2}/nodes/{3}/chaincodes/{4}/logs".format(getSchema(context.tls), context.remote_ip, context.network_id, peer, context.chaincodeSpec.get('chaincodeID', {}).get('name', '')) + response = httpGet(url, headers=headers) + else: + response = "No chaincode has been deployed" + return response + diff --git a/bddtests/steps/bdd_request_util.py b/bddtests/steps/bdd_request_util.py index cf39f4c5313..4314df9c4a7 100644 --- a/bddtests/steps/bdd_request_util.py +++ b/bddtests/steps/bdd_request_util.py @@ -16,6 +16,7 @@ import requests, json from bdd_test_util import bdd_log +from bdd_json_util import getAttributeFromJSON CORE_REST_PORT = "7050" ACCEPT_JSON_HEADER = { @@ -36,7 +37,12 @@ def httpGetToContainer(context, container, endpoint, \ to the default peer rest port) and the expectSuccess which validates the response returned a 200 """ request_url = buildContainerUrl(context, container, endpoint, port) - return httpGet(request_url, expectSuccess) + + headers = {'Accept': 'application/json', 'Content-type': 'application/json'} + #if context.byon: + # headers = getTokenedHeaders(context) + + return httpGet(request_url, headers=headers, expectSuccess=expectSuccess) def httpPostToContainerAlias(context, containerAlias, endpoint, body, \ port=CORE_REST_PORT, expectSuccess=True): @@ -52,7 +58,14 @@ def httpPostToContainer(context, container, endpoint, body, \ to the default peer rest port) and the expectSuccess which validates the response returned a 200 """ request_url = buildContainerUrl(context, container, endpoint, port) - return httpPost(request_url, body, expectSuccess) + bdd_log("Request URL: {}".format(request_url)) + + headers = {'Accept': 'application/json', 'Content-type': 'application/json'} + #if context.byon: + # headers = getTokenedHeaders(context) + bdd_log("Headers: {}".format(headers)) + + return httpPost(request_url, body, headers=headers, expectSuccess=expectSuccess) def buildContainerAliasUrl(context, containerAlias, endpoint, port=CORE_REST_PORT): """ Build a URL to do a HTTP request to the given container looking up the @@ -64,25 +77,34 @@ def buildContainerAliasUrl(context, containerAlias, endpoint, port=CORE_REST_POR def buildContainerUrl(context, container, endpoint, port=CORE_REST_PORT): """ Build a URL to do a HTTP request to the given container. Optionally provide a port too which defaults to the peer rest port """ + bdd_log("container: {}".format(container)) + bdd_log("name: {}".format(container.name)) + bdd_log("ipAddress: {}".format(container.ipAddress)) + + peer = container.name.split("_")[1] + #if context.byon: + # bdd_log("remote map: {}".format(context.remote_map)) + # port = context.remote_map[peer]['port'] return buildUrl(context, container.ipAddress, port, endpoint) def buildUrl(context, ipAddress, port, endpoint): schema = "http" - if 'TLS' in context.tags: + #if 'TLS' in context.tags or context.tls: + if hasattr(context.tags, 'TLS') or (hasattr(context, 'tls') and context.tls): schema = "https" return "{0}://{1}:{2}{3}".format(schema, ipAddress, port, endpoint) -def httpGet(url, expectSuccess=True): - return _request("GET", url, expectSuccess=expectSuccess) +def httpGet(url, headers=ACCEPT_JSON_HEADER, expectSuccess=True): + return _request("GET", url, headers, expectSuccess=expectSuccess) -def httpPost(url, body, expectSuccess=True): - return _request("POST", url, json=body, expectSuccess=expectSuccess) +def httpPost(url, body, headers=ACCEPT_JSON_HEADER, expectSuccess=True): + return _request("POST", url, headers=headers, expectSuccess=expectSuccess, json=body) -def _request(method, url, expectSuccess=True, **kwargs): +def _request(method, url, headers, expectSuccess=True, **kwargs): bdd_log("HTTP {} to url = {}".format(method, url)) response = requests.request(method, url, \ - headers=ACCEPT_JSON_HEADER, verify=False, **kwargs) + headers=headers, verify=False, **kwargs) if expectSuccess: assert response.status_code == 200, \ @@ -91,8 +113,56 @@ def _request(method, url, expectSuccess=True, **kwargs): bdd_log("Response from {}:".format(url)) bdd_log(formatResponseText(response)) + response.connection.close() return response def formatResponseText(response): # Limit to 300 chars because of the size of the Base64 encoded Certs - return json.dumps(response.json(), indent = 4)[:300] + bdd_log("Response: {}".format(response)) + try: + return json.dumps(response.json(), indent = 4)[:300] + except: + return "" + + +def getSchema(tls): + schema = "http" + if tls: + schema = "https" + return schema + + +def getTokenedHeaders(context): + getToken(context) + headers = {'Accept': 'application/json', 'Content-type': 'application/json'} + #if context.byon: + # headers = {'Accept': 'application/vnd.ibm.zaci.payload+json;version=1.0', + # 'Content-type': 'application/vnd.ibm.zaci.payload+json;version=1.0', + # 'Authorization': 'Bearer %s' % context.token, + # 'zACI-API': 'com.ibm.zaci.system/1.0'} + return headers + + +def getToken(context): + #if 'token' in context: + if hasattr(context, 'token'): + return context.token + + headers = {'Accept': 'application/json', 'Content-type': 'application/json'} + #if context.byon: + # headers = {'Accept': 'application/vnd.ibm.zaci.payload+json;version=1.0', + # 'Content-type': 'application/vnd.ibm.zaci.payload+json;version=1.0', + # 'zACI-API': 'com.ibm.zaci.system/1.0'} + + #url = "{0}://{1}/api/com.ibm.zaci.system/api-tokens".format(getSchema(context.tls), context.remote_ip) + url = "https://{1}/api/com.ibm.zaci.system/api-tokens".format(getSchema(context.tls), context.remote_ip) + body = {"kind": "request", + "parameters": {"user": "anunez", "password": "password"}} + response = httpPost(url, body, headers=headers) + try: + context.token = getAttributeFromJSON("parameters.token", response.json()) + except: + bdd_log("Unable to get the token for the network at {}".format(context.remote_ip)) + raise Exception, "Unable to get the network token" + return response + diff --git a/bddtests/steps/consensus.py b/bddtests/steps/consensus.py new file mode 100644 index 00000000000..7e2d7127ece --- /dev/null +++ b/bddtests/steps/consensus.py @@ -0,0 +1,69 @@ +# +# Copyright IBM Corp. 2016 All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os, os.path +import re +import time +import copy +from behave import * +from datetime import datetime, timedelta +import base64 + +import json + +import bdd_compose_util, bdd_test_util, bdd_request_util +#from bdd_json_util import getAttributeFromJSON +from bdd_test_util import bdd_log + +import sys, yaml +import subprocess + + +@given(u'I stop the chaincode') +def step_impl(context): + try: + # Kill chaincode containers + res, error, returncode = bdd_test_util.cli_call( + ["docker", "ps", "-n=4", "-q"], + expect_success=True) + bdd_log("Killing chaincode containers: {0}".format(res)) + result, error, returncode = bdd_test_util.cli_call( + ["docker", "rm", "-f"] + res.split('\n'), + expect_success=False) + bdd_log("Stopped chaincode containers") + + except: + raise Exception("Unable to kill chaincode images") + +@given(u'I remove the chaincode images') +def step_impl(context): + try: + # Kill chaincode images + res, error, returncode = bdd_test_util.cli_call( + ["docker", "images"], + expect_success=True) + images = res.split('\n') + for image in images: + if image.startswith('dev-vp'): + fields = image.split() + r, e, ret= bdd_test_util.cli_call( + ["docker", "rmi", "-f", fields[2]], + expect_success=False) + bdd_log("Removed chaincode images") + + except: + raise Exception("Unable to kill chaincode images") + diff --git a/bddtests/steps/peer_basic_impl.py b/bddtests/steps/peer_basic_impl.py index 0f660c4b595..323ba4a4b6d 100644 --- a/bddtests/steps/peer_basic_impl.py +++ b/bddtests/steps/peer_basic_impl.py @@ -40,7 +40,7 @@ def step_impl(context, composeYamlFile): bdd_compose_util.parseComposeOutput(context) - timeoutSeconds = 30 + timeoutSeconds = 60 assert bdd_compose_util.allContainersAreReadyWithinTimeout(context, timeoutSeconds), \ "Containers did not come up within {} seconds, aborting".format(timeoutSeconds) @@ -68,10 +68,10 @@ def formatStringToCompare(value): # double quotes are replaced by simple quotes because is not possible escape double quotes in the attribute parameters. return str(value).replace("\"", "'") -@then(u'I should get a JSON response with "{attribute}" = "{expectedValue}"') -def step_impl(context, attribute, expectedValue): - foundValue = getAttributeFromJSON(attribute, context.response.json()) - assert (formatStringToCompare(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue) +#@then(u'I should get a JSON response with "{attribute}" = "{expectedValue}"') +#def step_impl(context, attribute, expectedValue): +# foundValue = getAttributeFromJSON(attribute, context.response.json()) +# assert (formatStringToCompare(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue) @then(u'I should get a JSON response with array "{attribute}" contains "{expectedValue}" elements') def step_impl(context, attribute, expectedValue): @@ -518,7 +518,8 @@ def step_impl(context, attribute, expectedValue): assert 'responses' in context, "responses not found in context" for resp in context.responses: foundValue = getAttributeFromJSON(attribute, resp.json()) - assert (formatStringToCompare(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue) + errStr = "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue) + assert (formatStringToCompare(foundValue) == expectedValue), errStr @then(u'I should get a JSON response from peers with "{attribute}" = "{expectedValue}"') def step_impl(context, attribute, expectedValue): @@ -528,7 +529,56 @@ def step_impl(context, attribute, expectedValue): for resp in context.responses: foundValue = getAttributeFromJSON(attribute, resp.json()) - assert (formatStringToCompare(foundValue) == expectedValue), "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue) + errStr = "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue) + assert (formatStringToCompare(foundValue) == expectedValue), errStr + +@then(u'I should "{action}" the "{attribute}" from the JSON response') +def step_impl(context, action, attribute): + assert attribute in context.response.json(), "Attribute not found in response ({})".format(attribute) + foundValue = context.response.json()[attribute] + if action == 'store': + foundValue = getAttributeFromJSON(attribute, context.response.json()) + setattr(context, attribute, foundValue) + bdd_log("Stored %s: %s" % (attribute, getattr(context, attribute)) ) + +def checkHeight(context, foundValue, expectedValue): +# if context.byon: +# errStr = "For attribute height, expected equal or greater than (%s), instead found (%s)" % (expectedValue, foundValue) +# assert (foundValue >= int(expectedValue)), errStr +# elif expectedValue == 'previous': + if expectedValue == 'previous': + bdd_log("Stored height: {}".format(context.height)) + errStr = "For attribute height, expected (%s), instead found (%s)" % (context.height, foundValue) + assert (foundValue == context.height), errStr + else: + errStr = "For attribute height, expected (%s), instead found (%s)" % (expectedValue, foundValue) + assert (formatStringToCompare(foundValue) == expectedValue), errStr + +@then(u'I should get a JSON response with "{attribute}" = "{expectedValue}"') +def step_impl(context, attribute, expectedValue): + foundValue = getAttributeFromJSON(attribute, context.response.json()) + if attribute == 'height': + checkHeight(context, foundValue, expectedValue) + else: + errStr = "For attribute %s, expected (%s), instead found (%s)" % (attribute, expectedValue, foundValue) + assert (formatStringToCompare(foundValue) == expectedValue), errStr + # Set the new value of the attribute + setattr(context, attribute, foundValue) + +@then(u'I should get a JSON response with "{attribute}" > "{expectedValue}"') +def step_impl(context, attribute, expectedValue): + foundValue = getAttributeFromJSON(attribute, context.response.json()) + if expectedValue == 'previous': + prev_value = getattr(context, attribute) + bdd_log("Stored value: {}".format(prev_value)) + errStr = "For attribute %s, expected greater than (%s), instead found (%s)" % (attribute, prev_value, foundValue) + assert (foundValue > prev_value), errStr + else: + errStr = "For attribute %s, expected greater than (%s), instead found (%s)" % (attribute, expectedValue, foundValue) + assert (formatStringToCompare(foundValue) > expectedValue), errStr + # Set the new value of the attribute + setattr(context, attribute, foundValue) + @given(u'I register with CA supplying username "{userName}" and secret "{secret}" on peers') def step_impl(context, userName, secret): @@ -594,6 +644,10 @@ def step_impl(context, seconds): assert bdd_compose_util.allContainersAreReadyWithinTimeout(context, timeout), \ "Peers did not come up within {} seconds, aborting.".format(timeout) +@given(u'I start peers') +def step_impl(context): + compose_op(context, "start") + @given(u'I stop peers') def step_impl(context): compose_op(context, "stop") diff --git a/membersrvc/membersrvc.yaml b/membersrvc/membersrvc.yaml index bfdd3b0353e..39f200345af 100644 --- a/membersrvc/membersrvc.yaml +++ b/membersrvc/membersrvc.yaml @@ -134,6 +134,61 @@ eca: test_vp7: 4 twoKZouEyLyB test_vp8: 4 BxP7QNh778gI test_vp9: 4 wu3F1EwJWHvQ + test_vp10: 4 hNeS24SKJtMD + test_vp11: 4 ezTbMAUccdLy + test_vp12: 4 MSDr2juOIooZ + test_vp13: 4 DfPHFoFKj2jl + test_vp14: 4 NyxEfwjy7vPL + test_vp15: 4 sTHJYI3ndQH+ + test_vp16: 4 EePE5sgyIhos + test_vp17: 4 2uram7e1EgF+ + test_vp18: 4 75457cHKhNM7 + test_vp19: 4 eO6qRKBUMgSo + test_vp20: 4 ZZ4W81TbZo25 + test_vp21: 4 XHnsQcDUPHF9 + test_vp22: 4 8wCLDcVaK4ex + test_vp23: 4 qfXNCFie3kIY + test_vp24: 4 72eCtIbLP7c5 + test_vp25: 4 YR+3M+QhFfpd + test_vp26: 4 kTkEcWd+gBnb + test_vp27: 4 cPnZ0SeS2BiU + test_vp28: 4 kgP6gkToiaGt + test_vp29: 4 YASbynfsO/d3 + test_vp30: 4 Ph7O/rtDBKgn + test_vp31: 4 g+i7k8Ao1fQ6 + test_vp32: 4 WABL1OUtNAqG + test_vp33: 4 3vi4Op98jVYu + test_vp34: 4 Ydg0ubVwgovo + test_vp35: 4 yr6HKOqpgqrt + test_vp36: 4 +qdOftmDA2w9 + test_vp37: 4 Bti7oSazbQ8s + test_vp38: 4 Iyh5lx187+2D + test_vp39: 4 g8IdJk/AQztF + test_vp40: 4 +cqPDR3V5AQP + test_vp41: 4 w1Z0ZlkPn3fl + test_vp42: 4 mUx6HpXrmE6C + test_vp43: 4 nvk3eK/1A9+y + test_vp44: 4 68gIcAPFDlLt + test_vp45: 4 TgD7Sh7F5WGV + test_vp46: 4 cwjhpt50nxMT + test_vp47: 4 QIjtCM3k9Ump + test_vp48: 4 kMHYx4KOFus2 + test_vp49: 4 lKP5s+P+lbv+ + test_vp50: 4 +aaOfbbDA2w9 + test_vp51: 4 bTi8oSazbQ8s + test_vp52: 4 iyh6lx177+2D + test_vp53: 4 g9Idjk/aQztF + test_vp54: 4 +CqPdk3V5AQP + test_vp55: 4 w1Z3Zllon3fl + test_vp56: 4 mUx5HpNrmEHC + test_vp57: 4 nNk3e2/119+y + test_vp58: 4 BN8gIcAgFDlL + test_vp59: 4 JgD7Sh5F5WhN + test_vp60: 4 Fwjhpt60nxMT + test_vp61: 4 QJjtCM4k9Ump + test_vp62: 4 kKHYx4KOFus2 + test_vp63: 4 lREP5s+P+lbN + # Uncomment this section to activate devnet setup as specficied in # devnet-setup.md @@ -151,6 +206,61 @@ eca: test_user7: 1 YsWZD4qQmYxo institution_a test_user8: 1 W8G0usrU7jRk bank_a test_user9: 1 H80SiB5ODKKQ institution_a + test_user10: 1 n21Dq435t9S1 bank_b + test_user11: 1 6S0UjokSRHYh institution_a + test_user12: 1 dpodq6r2+NPu institution_a + test_user13: 1 9XZFoBjXJ5zM institution_a + test_user14: 1 6lOOiQXW5uXM institution_a + test_user15: 1 PTyW9AVbBSjk institution_a + test_user16: 1 wcaTkxDKsPCM institution_a + test_user17: 1 rbHqY17olLAD institution_a + test_user18: 1 3SYhAns5s729 institution_a + test_user19: 1 1lYX9NoOwgvN institution_a + test_user20: 1 gyDP7aahpqph institution_a + test_user21: 1 PUnCn/RYZdyX institution_a + test_user22: 1 /R9wyC84ioD/ institution_a + test_user23: 1 FQ84KofAsqJY institution_a + test_user24: 1 CbW/9KxEudXk institution_a + test_user25: 1 ZHTavGPdxP4A institution_a + test_user26: 1 XrLtn3pIMhLQ institution_a + test_user27: 1 3kLhEQFA0nyJ institution_a + test_user28: 1 iWnIDbiD728Q institution_a + test_user29: 1 LD6C5gyfXdLU institution_a + test_user30: 1 +GdkSA8cS7tL institution_a + test_user31: 1 +3mp7d2htUKf institution_a + test_user32: 1 78l1M9/ozzys institution_a + test_user33: 1 BeYdR44AdXfz institution_a + test_user34: 1 X58G9QguAS4L institution_a + test_user35: 1 s0Dfy5CHwZBr institution_a + test_user36: 1 LqHPms9dSdE/ institution_a + test_user37: 1 JcEhRJ6Dtj51 institution_a + test_user38: 1 T4VnFcoHgPSi institution_a + test_user39: 1 x3A4yUmHHAKA institution_a + test_user40: 1 5UvWmI8Ouz1K institution_a + test_user41: 1 oZkB+Gs7et1e institution_a + test_user42: 1 TOvOJ4DjZhCA institution_a + test_user43: 1 c28mez0rmYlB institution_a + test_user44: 1 EOthJFMIaZhm institution_a + test_user45: 1 QOcqpN8tl5c+ institution_a + test_user46: 1 alEvm5ZVTwzz institution_a + test_user47: 1 Qxj0YInOv1VZ institution_a + test_user48: 1 VBxJcw0NM/1w institution_a + test_user49: 1 W8OxOUfqgYbO institution_a + test_user50: 1 HcRhRJ7Dtj51 institution_a + test_user51: 1 4SVnfcoHgPSi institution_a + test_user52: 1 x3B4yUmHHAKA institution_a + test_user53: 1 5UvWmI7Uuz1K institution_a + test_user54: 1 oZkB+Gsn6t1e institution_a + test_user55: 1 TOhOJD1jZhCA institution_a + test_user56: 1 c45mez1rmYlB institution_a + test_user57: 1 EOthJMJIaZhm institution_a + test_user58: 1 QOcqpt9tl5c+ institution_a + test_user59: 1 aHEvm5ZVTwzz institution_a + test_user60: 1 nHj0YInOv1VZ institution_a + test_user61: 1 xJcw0NM+1wZD institution_a + test_user62: 1 PO+OUfqgYHdO institution_a + test_user63: 1 KiH09Pdq+ieO institution_a + test_nvp0: 2 iywrPBDEPl0K bank_a test_nvp1: 2 DcYXuRSocuqd institution_a