From 8343f83d135e0fda6a4a82bd0242d4bd2bb949e4 Mon Sep 17 00:00:00 2001 From: Ciju John Date: Fri, 16 Feb 2018 17:20:56 -0600 Subject: [PATCH] Import (from Dawn2x branch) script for testing malicious producers. Import updates to other test scripts. --- tests/CMakeLists.txt | 1 + .../consensusValidationMaliciousProducers.py | 414 ++++++++++++++++++ tests/distributed-transactions-remote-test.py | 6 +- tests/distributed-transactions-test.py | 6 +- tests/eosiod_run_remote_test.py | 11 +- tests/eosiod_run_test.py | 12 +- tests/restart-scenarios-test.py | 11 +- tests/testUtils.py | 152 +++++-- 8 files changed, 563 insertions(+), 50 deletions(-) create mode 100755 tests/consensusValidationMaliciousProducers.py diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d0ccf9175..fc1d5782f 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -69,6 +69,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/restart-scenarios-test.py ${CMAKE_CUR configure_file(${CMAKE_CURRENT_SOURCE_DIR}/testUtils.py ${CMAKE_CURRENT_BINARY_DIR}/testUtils.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/eosiod_run_test.py ${CMAKE_CURRENT_BINARY_DIR}/eosiod_run_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/eosiod_run_remote_test.py ${CMAKE_CURRENT_BINARY_DIR}/eosiod_run_remote_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/consensusValidationMaliciousProducers.py ${CMAKE_CURRENT_BINARY_DIR}/consensusValidationMaliciousProducers.py COPYONLY) add_test(chain_test chain_test --report_level=detailed) add_test(NAME eosiod_run_test COMMAND tests/eosiod_run_test.py -v --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/consensusValidationMaliciousProducers.py b/tests/consensusValidationMaliciousProducers.py new file mode 100755 index 000000000..751cc4910 --- /dev/null +++ b/tests/consensusValidationMaliciousProducers.py @@ -0,0 +1,414 @@ +#!/usr/bin/python3 + +import testUtils + +import argparse +import signal +from collections import namedtuple +import os +import shutil + +############################################################### +# Test for validating consensus based block production. We introduce malicious producers which +# reject all transactions. +# We have three test scenarios: +# - No malicious producers. Transactions should be incorporated into the chain. +# - Minority malicious producers (less than a third producer count). Transactions will get incorporated +# into the chain as majority appoves the transactions. +# - Majority malicious producer count (greater than a third producer count). Transactions won't get +# incorporated into the chain as majority rejects the transactions. +############################################################### + + +Print=testUtils.Utils.Print + +StagedNodeInfo=namedtuple("StagedNodeInfo", "config logging") + + +logging00="""{ + "includes": [], + "appenders": [{ + "name": "stderr", + "type": "console", + "args": { + "stream": "std_error", + "level_colors": [{ + "level": "debug", + "color": "green" + },{ + "level": "warn", + "color": "brown" + },{ + "level": "error", + "color": "red" + } + ] + }, + "enabled": true + },{ + "name": "stdout", + "type": "console", + "args": { + "stream": "std_out", + "level_colors": [{ + "level": "debug", + "color": "green" + },{ + "level": "warn", + "color": "brown" + },{ + "level": "error", + "color": "red" + } + ] + }, + "enabled": true + },{ + "name": "net", + "type": "gelf", + "args": { + "endpoint": "10.160.11.21:12201", + "host": "testnet_00" + }, + "enabled": true + } + ], + "loggers": [{ + "name": "default", + "level": "debug", + "enabled": true, + "additivity": false, + "appenders": [ + "stderr", + "net" + ] + } + ] +}""" + +config00="""genesis-json = ./genesis.json +block-log-dir = blocks +readonly = 0 +send-whole-blocks = true +shared-file-dir = blockchain +shared-file-size = 8192 +http-server-address = 127.0.0.1:8888 +p2p-listen-endpoint = 0.0.0.0:9876 +p2p-server-address = localhost:9876 +allowed-connection = any +p2p-peer-address = localhost:9877 +required-participation = true +private-key = ["EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"] +producer-name = initu +plugin = eosio::producer_plugin +plugin = eosio::chain_api_plugin +plugin = eosio::account_history_plugin +plugin = eosio::account_history_api_plugin""" + + +config01="""genesis-json = ./genesis.json +block-log-dir = blocks +readonly = 0 +send-whole-blocks = true +shared-file-dir = blockchain +shared-file-size = 8192 +http-server-address = 127.0.0.1:8889 +p2p-listen-endpoint = 0.0.0.0:9877 +p2p-server-address = localhost:9877 +allowed-connection = any +p2p-peer-address = localhost:9876 +required-participation = true +private-key = ["EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"] +producer-name = initb +plugin = eosio::producer_plugin +plugin = eosio::chain_api_plugin +plugin = eosio::account_history_plugin +plugin = eosio::account_history_api_plugin""" + + +producers="""producer-name = initd +producer-name = initf +producer-name = inith +producer-name = initj +producer-name = initl +producer-name = initn +producer-name = initp +producer-name = initr +producer-name = initt +producer-name = inita +producer-name = initc +producer-name = inite +producer-name = initg +producer-name = initi +producer-name = initk +producer-name = initm +producer-name = inito +producer-name = initq +producer-name = inits""" + +zeroExecTime="trans-execution-time = 0" + +def getNoMaliciousStagedNodesInfo(): + stagedNodesInfo=[] + myConfig00=config00 + stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00)) + myConfig01=config01+"\n"+producers + stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00)) + return stagedNodesInfo + +def getMinorityMaliciousProducerStagedNodesInfo(): + stagedNodesInfo=[] + myConfig00=config00+"\n"+producers + stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00)) + myConfig01=config01+"\n"+zeroExecTime + stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00)) + return stagedNodesInfo + +def getMajorityMaliciousProducerStagedNodesInfo(): + stagedNodesInfo=[] + myConfig00=config00 + stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00)) + myConfig01=config01+"\n"+producers+"\n"+zeroExecTime + stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00)) + return stagedNodesInfo + +stagingDir="staging" +def stageScenario(stagedNodeInfos): + assert(stagedNodeInfos != None) + assert(len(stagedNodeInfos) > 1) + + os.makedirs(stagingDir) + count=0 + for stagedNodeInfo in stagedNodeInfos: + dataPath=os.path.join(stagingDir, "tn_data_%02d" % (count)) + os.makedirs(dataPath) + with open(os.path.join(dataPath, "config.ini"), "w") as textFile: + print(stagedNodeInfo.config,file=textFile) + with open(os.path.join(dataPath, "logging.json"), "w") as textFile: + print(stagedNodeInfo.logging,file=textFile) + count += 1 + return + +def cleanStaging(): + os.path.exists(stagingDir) and shutil.rmtree(stagingDir) + + +def errorExit(msg="", errorCode=1): + Print("ERROR:", msg) + exit(errorCode) + +def error(msg="", errorCode=1): + Print("ERROR:", msg) + +parser = argparse.ArgumentParser() +tests=[1,2,3] + +parser.add_argument("-t", "--tests", type=str, help="1|2|3 1=run no malicious producers test, 2=minority malicious, 3=majority malicious.", default=None) +parser.add_argument("-w", type=int, help="system wait time", default=testUtils.Utils.systemWaitTimeout) +parser.add_argument("-v", help="verbose logging", action='store_true') +parser.add_argument("--dumpErrorDetails", + help="Upon error print tn_data_*/config.ini and tn_data_*/stderr.log to stdout", + action='store_true') +parser.add_argument("--keepLogs", help="Don't delete tn_data_* folders upon test completion", + action='store_true') +parser.add_argument("--not-noon", help="This is not the Noon branch.", action='store_true') +parser.add_argument("--dontKill", help="Leave cluster running after test finishes", action='store_true') + +args = parser.parse_args() +testsArg=args.tests +debug=args.v +waitTimeout=args.w +dumpErrorDetails=args.dumpErrorDetails +keepLogs=args.keepLogs +amINoon=not args.not_noon +killEosInstances= not args.dontKill +killWallet= not args.dontKill + +testUtils.Utils.Debug=debug + +assert (testsArg is None or testsArg == "1" or testsArg == "2" or testsArg == "3") +if testsArg is not None: + tests=[int(testsArg)] + +testUtils.Utils.setSystemWaitTimeout(waitTimeout) +testUtils.Utils.iAmNotNoon() + +def myTest(transWillEnterBlock): + testSuccessful=False + + cluster=testUtils.Cluster(walletd=True, staging=True) + walletMgr=testUtils.WalletMgr(True) + + try: + cluster.killall() + cluster.cleanup() + walletMgr.killall() + walletMgr.cleanup() + + pnodes=2 + total_nodes=pnodes + topo="mesh" + delay=0 + Print("Stand up cluster") + if cluster.launch(pnodes, total_nodes, topo, delay) is False: + error("Failed to stand up eos cluster.") + return False + + accounts=testUtils.Cluster.createAccountKeys(1) + if accounts is None: + error("FAILURE - create keys") + return False + currencyAccount=accounts[0] + currencyAccount.name="currency" + + Print("Stand up walletd") + if walletMgr.launch() is False: + error("Failed to stand up eos walletd.") + return False + + testWalletName="test" + Print("Creating wallet \"%s\"." % (testWalletName)) + testWallet=walletMgr.create(testWalletName) + if testWallet is None: + error("Failed to create wallet %s." % (testWalletName)) + return False + + for account in accounts: + Print("Importing keys for account %s into wallet %s." % (account.name, testWallet.name)) + if not walletMgr.importKey(account, testWallet): + error("Failed to import key for account %s" % (account.name)) + return False + + node=cluster.getNode(0) + node2=cluster.getNode(1) + if node is None or node2 is None: + error("Cluster in bad state, received None node") + return False + + initaAccount=testUtils.Cluster.initaAccount + + Print("Importing keys for account %s into wallet %s." % (initaAccount.name, testWallet.name)) + if not walletMgr.importKey(initaAccount, testWallet): + error("Failed to import key for account %s" % (initaAccount.name)) + return False + + Print("Create new account %s via %s" % (currencyAccount.name, initaAccount.name)) + transId=node.createAccount(currencyAccount, initaAccount, stakedDeposit=5000, waitForTransBlock=True) + if transId is None: + error("Failed to create account %s" % (currencyAccount.name)) + return False + + wastFile="contracts/currency/currency.wast" + abiFile="contracts/currency/currency.abi" + Print("Publish contract") + trans=node.publishContract(currencyAccount.name, wastFile, abiFile, waitForTransBlock=True) + if trans is None: + error("Failed to publish contract.") + return False + + Print("push transfer action to currency contract") + contract="currency" + action="transfer" + data="{\"from\":\"currency\",\"to\":\"inita\",\"quantity\":" + if amINoon: + data +="\"00.0050 CUR\",\"memo\":\"test\"}" + else: + data +="50}" + opts="--permission currency@active" + if not amINoon: + opts += " --scope currency,inita" + + trans=node.pushMessage(contract, action, data, opts, silentErrors=True) + transInBlock=False + if not trans[0]: + # On slower systems e.g Travis the transaction rejection can happen immediately + # We want to handle fast and slow failures. + if "allocated processing time was exceeded" in trans[1]: + Print("Push message transaction immediately failed.") + else: + error("Exception in push message. %s" % (trans[1])) + return False + + else: + transId=testUtils.Node.getTransId(trans[1]) + + Print("verify transaction exists") + if not node2.waitForTransIdOnNode(transId): + error("Transaction never made it to node2") + return False + + Print("Get details for transaction %s" % (transId)) + transaction=node2.getTransaction(transId) + signature=transaction["transaction"]["signatures"][0] + + blockNum=int(transaction["transaction"]["ref_block_num"]) + blockNum += 1 + Print("Our transaction is in block %d" % (blockNum)) + + block=node2.getBlock(blockNum) + cycles=block["cycles"] + if len(cycles) > 0: + blockTransSignature=cycles[0][0]["user_input"][0]["signatures"][0] + # Print("Transaction signature: %s\nBlock transaction signature: %s" % + # (signature, blockTransSignature)) + transInBlock=(signature == blockTransSignature) + + if transWillEnterBlock: + if not transInBlock: + error("Transaction did not enter the chain.") + return False + else: + Print("SUCCESS: Transaction1 entered in the chain.") + elif not transWillEnterBlock: + if transInBlock: + error("Transaction entered the chain.") + return False + else: + Print("SUCCESS: Transaction2 did not enter the chain.") + + testSuccessful=True + finally: + if not testSuccessful and dumpErrorDetails: + cluster.dumpErrorDetails() + walletMgr.dumpErrorDetails() + Print("== Errors see above ==") + + if killEosInstances: + Print("Shut down the cluster%s" % (" and cleanup." if (testSuccessful and not keepLogs) else ".")) + cluster.killall() + walletMgr.killall() + if testSuccessful and not keepLogs: + Print("Cleanup cluster and wallet data.") + cluster.cleanup() + walletMgr.cleanup() + + return True + + +try: + if 1 in tests: + Print("Cluster with no malicious producers. All producers expected to approve transaction. Hence transaction is expected to enter the chain.") + cleanStaging() + stageScenario(getNoMaliciousStagedNodesInfo()) + if not myTest(True): + exit(1) + + if 2 in tests: + Print("\nCluster with minority(1) malicious nodes. Majority producers expected to approve transaction. Hence transaction is expected to enter the chain.") + cleanStaging() + stageScenario(getMinorityMaliciousProducerStagedNodesInfo()) + if not myTest(True): + exit(1) + + if 3 in tests: + Print("\nCluster with majority(20) malicious nodes. Majority producers expected to block transaction. Hence transaction is not expected to enter the chain.") + cleanStaging() + stageScenario(getMajorityMaliciousProducerStagedNodesInfo()) + if not myTest(False): + exit(1) + +finally: + cleanStaging() + +exit(0) + diff --git a/tests/distributed-transactions-remote-test.py b/tests/distributed-transactions-remote-test.py index c960234a5..bb7451f1d 100755 --- a/tests/distributed-transactions-remote-test.py +++ b/tests/distributed-transactions-remote-test.py @@ -17,6 +17,7 @@ parser = argparse.ArgumentParser() parser.add_argument("-p", type=int, help="producing nodes count", default=pnodes) parser.add_argument("-v", help="verbose", action='store_true') parser.add_argument("--nodes-file", type=str, help="File containing nodes info in JSON format.", default=nodesFile) +parser.add_argument("--not-noon", help="This is not the Noon branch.", action='store_true') parser.add_argument("--dump-error-details", help="Upon error print tn_data_*/config.ini and tn_data_*/stderr.log to stdout", action='store_true') @@ -25,6 +26,7 @@ args = parser.parse_args() pnodes=args.p nodesFile=args.nodes_file debug=args.v +amINoon=not args.not_noon dumpErrorDetails=args.dump_error_details testUtils.Utils.Debug=debug @@ -36,6 +38,8 @@ total_nodes=pnodes actualTest="tests/distributed-transactions-test.py" testSuccessful=False +if not amINoon: + testUtils.Utils.iAmNotNoon() cluster=testUtils.Cluster() try: @@ -54,7 +58,7 @@ try: if not cluster.waitOnClusterBlockNumSync(3): errorExit("Cluster never stabilized") - cmd="%s --nodes-file %s %s" % (actualTest, nodesFile, "-v" if debug else "") + cmd="%s --nodes-file %s %s %s" % (actualTest, nodesFile, "-v" if debug else "", "" if amINoon else "--not-noon") Print("Starting up distributed transactions test: %s" % (actualTest)) Print("cmd: %s\n" % (cmd)) if 0 != subprocess.call(cmd, shell=True): diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index 938fa25aa..e2edb7d31 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -21,6 +21,7 @@ parser.add_argument("-s", type=str, help="topology", default="mesh") parser.add_argument("-v", help="verbose", action='store_true') parser.add_argument("--nodes-file", type=str, help="File containing nodes info in JSON format.") parser.add_argument("--seed", type=int, help="random seed", default=seed) +parser.add_argument("--not-noon", help="This is not the Noon branch.", action='store_true') parser.add_argument("--dump-error-details", help="Upon error print tn_data_*/config.ini and tn_data_*/stderr.log to stdout", action='store_true') @@ -33,6 +34,7 @@ total_nodes = pnodes if args.n == 0 else args.n debug=args.v nodesFile=args.nodes_file seed=args.seed +amINoon=not args.not_noon dumpErrorDetails=args.dump_error_details killWallet=True @@ -43,6 +45,9 @@ if nodesFile is not None: testUtils.Utils.Debug=debug testSuccessful=False +if not amINoon: + testUtils.Utils.iAmNotNoon() + random.seed(seed) # Use a fixed seed for repeatability. cluster=testUtils.Cluster(walletd=True) walletMgr=testUtils.WalletMgr(True) @@ -52,7 +57,6 @@ try: Print("Stand up walletd") if walletMgr.launch() is False: - cmdError("%s" % (WalletdName)) errorExit("Failed to stand up eos walletd.") if nodesFile is not None: diff --git a/tests/eosiod_run_remote_test.py b/tests/eosiod_run_remote_test.py index c1b6bd2af..5d88b139a 100755 --- a/tests/eosiod_run_remote_test.py +++ b/tests/eosiod_run_remote_test.py @@ -13,12 +13,14 @@ def errorExit(msg="", errorCode=1): parser = argparse.ArgumentParser() parser.add_argument("-v", help="verbose", action='store_true') +parser.add_argument("--not-noon", help="This is not the Noon branch.", action='store_true') parser.add_argument("--dump-error-details", help="Upon error print tn_data_*/config.ini and tn_data_*/stderr.log to stdout", action='store_true') args = parser.parse_args() debug=args.v +amINoon=not args.not_noon dumpErrorDetails=args.dump_error_details testUtils.Utils.Debug=debug @@ -29,8 +31,13 @@ delay=1 pnodes=1 total_nodes=pnodes actualTest="tests/eosiod_run_test.py" +if not amINoon: + actualTest="tests/eosd_run_test.py" testSuccessful=False +if not amINoon: + testUtils.Utils.iAmNotNoon() + cluster=testUtils.Cluster() try: Print("BEGIN") @@ -48,8 +55,8 @@ try: if not cluster.waitOnClusterBlockNumSync(3): errorExit("Cluster never stabilized") - cmd="%s --dont-launch --exit-early %s" % (actualTest, "-v" if debug else "") - Print("Starting up eosiod test: %s" % (actualTest)) + cmd="%s --dont-launch %s %s" % (actualTest, "-v" if debug else "", "" if amINoon else "--not-noon") + Print("Starting up %s test: %s" % ("eosiod" if amINoon else "eosd", actualTest)) Print("cmd: %s\n" % (cmd)) if 0 != subprocess.call(cmd, shell=True): errorExit("failed to run cmd.") diff --git a/tests/eosiod_run_test.py b/tests/eosiod_run_test.py index 9888facdf..1cc2f0ee0 100755 --- a/tests/eosiod_run_test.py +++ b/tests/eosiod_run_test.py @@ -458,13 +458,19 @@ try: Print("push transfer action to currency contract") contract="currency" action="transfer" - data="{\"from\":\"currency\",\"to\":\"inita\",\"quantity\":\"00.0050 CUR\",\"memo\":\"test\"}" + data="{\"from\":\"currency\",\"to\":\"inita\",\"quantity\":" + if amINoon: + data +="\"00.0050 CUR\",\"memo\":\"test\"}" + else: + data +="50}" opts="--permission currency@active" + if not amINoon: + opts += " --scope currency,inita" trans=node.pushMessage(contract, action, data, opts) - if trans is None: + if not trans[0]: cmdError("%s push message currency transfer" % (ClientName)) errorExit("Failed to push message to currency contract") - transId=testUtils.Node.getTransId(trans) + transId=testUtils.Node.getTransId(trans[1]) Print("verify transaction exists") if not node.waitForTransIdOnNode(transId): diff --git a/tests/restart-scenarios-test.py b/tests/restart-scenarios-test.py index 79e08a94b..f982b08ca 100755 --- a/tests/restart-scenarios-test.py +++ b/tests/restart-scenarios-test.py @@ -22,7 +22,7 @@ import signal ############################################################### -DefaultKillPercent=50 +DefaultKillPercent=25 Print=testUtils.Utils.Print def errorExit(msg="", errorCode=1): @@ -41,6 +41,7 @@ parser.add_argument("--killSig", type=str, help="kill signal[%s|%s]" % parser.add_argument("--killCount", type=int, help="eosiod instances to kill", default=-1) parser.add_argument("-v", help="verbose logging", action='store_true') parser.add_argument("--dontKill", help="Leave cluster running after test finishes", action='store_true') +parser.add_argument("--not-noon", help="This is not the Noon branch.", action='store_true') parser.add_argument("--dumpErrorDetails", help="Upon error print tn_data_*/config.ini and tn_data_*/stderr.log to stdout", action='store_true') @@ -54,14 +55,18 @@ delay=args.d chainSyncStrategyStr=args.c debug=args.v total_nodes = pnodes -killCount=args.killCount if args.killCount > 0 else int((DefaultKillPercent/100.0)*total_nodes) +killCount=args.killCount if args.killCount > 0 else int(round((DefaultKillPercent/100.0)*total_nodes)) killSignal=args.killSig killEosInstances= not args.dontKill dumpErrorDetails=args.dumpErrorDetails keepLogs=args.keepLogs +amINoon=not args.not_noon testUtils.Utils.Debug=debug +if not amINoon: + testUtils.Utils.iAmNotNoon() + Print ("producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d, chain sync strategy: %s" % ( pnodes, topo, delay, chainSyncStrategyStr)) @@ -148,7 +153,7 @@ finally: if not testSuccessful and dumpErrorDetails: cluster.dumpErrorDetails() walletMgr.dumpErrorDetails() - Utils.Print("== Errors see above ==") + Print("== Errors see above ==") if killEosInstances: Print("Shut down the cluster%s" % (" and cleanup." if (testSuccessful and not keepLogs) else ".")) diff --git a/tests/testUtils.py b/tests/testUtils.py index 87d85f3cc..45917fa2c 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -23,11 +23,13 @@ class Utils: Debug=False FNull = open(os.devnull, 'w') - EosServerName="eosiod" EosClientPath="programs/eosioc/eosioc" - EosWalletPath="programs/eosiowd/eosiowd" + + EosWalletName="eosiowd" + EosWalletPath="programs/eosiowd/"+ EosWalletName + EosServerName="eosiod" - EosServerPath="programs/eosiod/%s" % (EosServerName) + EosServerPath="programs/eosiod/"+ EosServerName EosLauncherPath="programs/eosio-launcher/eosio-launcher" MongoPath="mongo" @@ -48,6 +50,8 @@ class Utils: SigKillTag="kill" SigTermTag="term" + systemWaitTimeout=60 + # mongoSyncTime: eosiod mongodb plugin seems to sync with a 10-15 seconds delay. This will inject # a wait period before the 2nd DB check (if first check fails) mongoSyncTime=25 @@ -57,15 +61,23 @@ class Utils: @staticmethod def iAmNotNoon(): Utils.amINoon=False - Utils.EosServerName="eosd" + Utils.EosClientPath="programs/eosc/eosc" - Utils.EosWalletPath="programs/eos-walletd/eos-walletd" - Utils.EosServerPath="programs/eosd/%s" % (Utils.EosServerName) + + Utils.EosWalletName="eos-walletd" + Utils.EosWalletPath="programs/eos-walletd/"+ Utils.EosWalletName + + Utils.EosServerName="eosd" + Utils.EosServerPath="programs/eosd/"+ Utils.EosServerName @staticmethod def setMongoSyncTime(syncTime): Utils.mongoSyncTime=syncTime - + + @staticmethod + def setSystemWaitTimeout(timeout): + Utils.systemWaitTimeout=timeout + @staticmethod def getChainStrategies(): chainSyncStrategies={} @@ -81,6 +93,12 @@ class Utils: return chainSyncStrategies + @staticmethod + def checkOutput(cmd): + assert(isinstance(cmd, list)) + retStr=subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8") + return retStr + @staticmethod def errorExit(msg="", raw=False, errorCode=1): Utils.Print("ERROR:" if not raw else "", msg) @@ -140,7 +158,7 @@ class Node(object): @staticmethod def runCmdReturnJson(cmd, trace=False): - retStr=Node.__checkOutput(cmd.split()) + retStr=Utils.checkOutput(cmd.split()) jStr=Node.filterJsonObject(retStr) trace and Utils.Print ("RAW > %s"% retStr) trace and Utils.Print ("JSON> %s"% jStr) @@ -149,7 +167,7 @@ class Node(object): @staticmethod def __runCmdArrReturnJson(cmdArr, trace=False): - retStr=Node.__checkOutput(cmdArr) + retStr=Utils.checkOutput(cmdArr) jStr=Node.filterJsonObject(retStr) trace and Utils.Print ("RAW > %s"% retStr) trace and Utils.Print ("JSON> %s"% jStr) @@ -220,6 +238,7 @@ class Node(object): @staticmethod def getTransId(trans): + #Utils.Print("%s" % trans) transId=trans["transaction_id"] return transId @@ -259,6 +278,7 @@ class Node(object): if not retry: break if self.mongoSyncTime is not None: + Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (self.mongoSyncTime)) time.sleep(self.mongoSyncTime) return None @@ -280,17 +300,18 @@ class Node(object): if not retry: break if self.mongoSyncTime is not None: + Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (self.mongoSyncTime)) time.sleep(self.mongoSyncTime) return None def doesNodeHaveBlockNum(self, blockNum): - if self.alive is False: - return False + assert isinstance(blockNum, int) - block=self.getBlock(blockNum, silentErrors=True) - if block is None: + info=self.getInfo(silentErrors=True) + last_irreversible_block_num=int(info["last_irreversible_block_num"]) + if blockNum > last_irreversible_block_num: return False else: return True @@ -323,6 +344,7 @@ class Node(object): if not retry: break if self.mongoSyncTime is not None: + Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (self.mongoSyncTime)) time.sleep(self.mongoSyncTime) return None @@ -344,6 +366,7 @@ class Node(object): if not retry: break if self.mongoSyncTime is not None: + Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (self.mongoSyncTime)) time.sleep(self.mongoSyncTime) return None @@ -367,6 +390,7 @@ class Node(object): if not retry: break if self.mongoSyncTime is not None: + Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (self.mongoSyncTime)) time.sleep(self.mongoSyncTime) return None @@ -388,18 +412,25 @@ class Node(object): if not retry: break if self.mongoSyncTime is not None: + Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (self.mongoSyncTime)) time.sleep(self.mongoSyncTime) return None - def doesNodeHaveTransId(self, transId): trans=self.getTransaction(transId, silentErrors=True) - if trans is not None: - return True - else: + if trans is None: return False + blockNum=None + if not self.enableMongo: + blockNum=int(trans["transaction"]["data"]["ref_block_num"]) + else: + blockNum=int(trans["ref_block_num"]) + + blockNum += 1 + return self.doesNodeHaveBlockNum(blockNum) + def createInitAccounts(self): eosio = copy.copy(Cluster.initaAccount) eosio.name = "eosio" @@ -418,7 +449,7 @@ class Node(object): data="{\"to\":\"eosio\",\"quantity\":\"1000000000.0000 EOS\"}" opts="--permission eosio@active" trans=self.pushMessage(contract, action, data, opts) - transId=Node.getTransId(trans) + transId=Node.getTransId(trans[1]) self.waitForTransIdOnNode(transId) initx = copy.copy(Cluster.initaAccount) @@ -445,7 +476,7 @@ class Node(object): cmd="%s %s create account %s %s %s %s" % (Utils.EosClientPath, self.endpointArgs, creatorAccount.name, account.name, account.ownerPublicKey, account.activePublicKey) - + Utils.Debug and Utils.Print("cmd: %s" % (cmd)) trans=None try: @@ -522,48 +553,61 @@ class Node(object): return None return ret if self.mongoSyncTime is not None: + Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (self.mongoSyncTime)) time.sleep(self.mongoSyncTime) return None - def waitForBlockNumOnNode(self, blockNum, timeout=60): + def waitForBlockNumOnNode(self, blockNum, timeout=None): + if timeout is None: + timeout=Utils.systemWaitTimeout startTime=time.time() remainingTime=timeout + Utils.Debug and Utils.Print("cmd: remaining time %d seconds" % (remainingTime)) while time.time()-startTime < timeout: if self.doesNodeHaveBlockNum(blockNum): return True sleepTime=3 if remainingTime > 3 else (3 - remainingTime) remainingTime -= sleepTime + Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (sleepTime)) time.sleep(sleepTime) return False - def waitForTransIdOnNode(self, transId, timeout=60): + def waitForTransIdOnNode(self, transId, timeout=None): + if timeout is None: + timeout=Utils.systemWaitTimeout startTime=time.time() remainingTime=timeout + Utils.Debug and Utils.Print("cmd: remaining time %d seconds" % (remainingTime)) while time.time()-startTime < timeout: if self.doesNodeHaveTransId(transId): return True sleepTime=3 if remainingTime > 3 else (3 - remainingTime) remainingTime -= sleepTime + Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (sleepTime)) time.sleep(sleepTime) return False - def waitForNextBlock(self, timeout=60): + def waitForNextBlock(self, timeout=None): + if timeout is None: + timeout=Utils.systemWaitTimeout startTime=time.time() remainingTime=timeout - num=self.getHeadBlockNum() + Utils.Debug and Utils.Print("cmd: remaining time %d seconds" % (remainingTime)) + num=self.getIrreversibleBlockNum() Utils.Debug and Utils.Print("Current block number: %s" % (num)) while time.time()-startTime < timeout: - nextNum=self.getHeadBlockNum() + nextNum=self.getIrreversibleBlockNum() if nextNum > num: Utils.Debug and Utils.Print("Next block number: %s" % (nextNum)) return True sleepTime=.5 if remainingTime > .5 else (.5 - remainingTime) remainingTime -= sleepTime + Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (sleepTime)) time.sleep(sleepTime) return False @@ -652,6 +696,7 @@ class Node(object): return balance else: if self.mongoSyncTime is not None: + Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (self.mongoSyncTime)) time.sleep(self.mongoSyncTime) account=self.getEosAccountFromDb(name) @@ -689,7 +734,7 @@ class Node(object): cmd="%s %s get code %s" % (Utils.EosClientPath, self.endpointArgs, account) Utils.Debug and Utils.Print("cmd: %s" % (cmd)) try: - retStr=Node.__checkOutput(cmd.split()) + retStr=Utils.checkOutput(cmd.split()) #Utils.Print ("get code> %s"% retStr) p=re.compile('code\shash: (\w+)\n', re.MULTILINE) m=p.search(retStr) @@ -788,7 +833,8 @@ class Node(object): keys=list(row.keys()) return keys - def pushMessage(self, contract, action, data, opts): + # returns tuple with transaction and + def pushMessage(self, contract, action, data, opts, silentErrors=False): cmd=None if Utils.amINoon: cmd="%s %s push action %s %s" % (Utils.EosClientPath, self.endpointArgs, contract, action) @@ -802,11 +848,12 @@ class Node(object): Utils.Debug and Utils.Print("cmd: %s" % (cmdArr)) try: trans=Node.__runCmdArrReturnJson(cmdArr) - return trans + return (True, trans) except subprocess.CalledProcessError as ex: msg=ex.output.decode("utf-8") - Utils.Print("ERROR: Exception during push message. %s" % (msg)) - return None + if not silentErrors: + Utils.Print("ERROR: Exception during push message. %s" % (msg)) + return (False, msg) def setPermission(self, account, code, pType, requirement, waitForTransBlock=False): cmd="%s %s set action permission %s %s %s %s" % ( @@ -870,7 +917,19 @@ class Node(object): blockNum=block["block_num"] return blockNum return None - + + def getIrreversibleBlockNum(self): + if not self.enableMongo: + info=self.getInfo() + if info is not None: + return info["last_irreversible_block_num"] + else: + block=self.getBlockFromDb(-1) + if block is not None: + blockNum=block["block_num"] + return blockNum + return None + ########################################################################################### Wallet=namedtuple("Wallet", "name password host port") @@ -972,7 +1031,7 @@ class WalletMgr(object): def unlockWallet(self, wallet): cmd="%s %s wallet unlock --name %s" % (Utils.EosClientPath, self.endpointArgs, wallet.name) - #Utils.Debug and Utils.Print("cmd: %s" % (cmd)) + Utils.Debug and Utils.Print("cmd: %s" % (cmd)) popen=subprocess.Popen(cmd.split(), stdout=Utils.FNull, stdin=subprocess.PIPE) outs, errs = popen.communicate(input=wallet.password.encode("utf-8")) if 0 != popen.wait(): @@ -1032,8 +1091,9 @@ class WalletMgr(object): shutil.copyfileobj(f, sys.stdout) def killall(self): - if self.__walletPid is not None: - os.kill(self.__walletPid, signal.SIGKILL) + cmd="pkill %s" % (Utils.EosWalletName) + Utils.Debug and Utils.Print("cmd: %s" % (cmd)) + subprocess.call(cmd.split()) def cleanup(self): dataDir=WalletMgr.__walletDataDir @@ -1062,7 +1122,7 @@ class Cluster(object): initbAccount.activePublicKey="EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV"; # walletd [True|False] Is walletd running. If not load the wallet plugin - def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888, walletHost="localhost", walletPort=8899, enableMongo=False, mongoHost="localhost", mongoPort=27017, mongoDb="EOStest", initaPrvtKey=initaAccount.ownerPrivateKey, initbPrvtKey=initbAccount.ownerPrivateKey): + def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888, walletHost="localhost", walletPort=8899, enableMongo=False, mongoHost="localhost", mongoPort=27017, mongoDb="EOStest", initaPrvtKey=initaAccount.ownerPrivateKey, initbPrvtKey=initbAccount.ownerPrivateKey, staging=False): self.accounts={} self.nodes={} self.localCluster=localCluster @@ -1087,6 +1147,7 @@ class Cluster(object): self.mongoEndpointArgs += "--host %s --port %d %s" % (mongoHost, mongoPort, mongoDb) Cluster.initaAccount.ownerPrivateKey=initaPrvtKey Cluster.initbAccount.ownerPrivateKey=initbPrvtKey + self.staging=staging def setChainStrategy(self, chainSyncStrategy=Utils.SyncReplayTag): self.__chainSyncStrategy=self.__chainSyncStrategies.get(chainSyncStrategy) @@ -1108,6 +1169,8 @@ class Cluster(object): cmd="%s -p %s -n %s -s %s -d %s" % ( Utils.EosLauncherPath, pnodes, total_nodes, topo, delay) cmdArr=cmd.split() + if self.staging: + cmdArr.append("--nogen") if not self.walletd or self.enableMongo: if Utils.amINoon: cmdArr.append("--eosiod") @@ -1185,8 +1248,11 @@ class Cluster(object): # If a last transaction exists wait for it on root node, then collect its head block number. # Wait on this block number on each cluster node - def waitOnClusterSync(self, timeout=60): + def waitOnClusterSync(self, timeout=None): + if timeout is None: + timeout=Utils.systemWaitTimeout startTime=time.time() + Utils.Debug and Utils.Print("cmd: remaining time %d seconds" % (timeout)) if self.nodes[0].alive is False: Utils.Print("ERROR: Root node is down.") return False; @@ -1205,9 +1271,12 @@ class Cluster(object): currentTimeout=timeout-(time.time()-startTime) return self.waitOnClusterBlockNumSync(targetHeadBlockNum, currentTimeout) - def waitOnClusterBlockNumSync(self, targetHeadBlockNum, timeout=60): + def waitOnClusterBlockNumSync(self, targetHeadBlockNum, timeout=None): + if timeout is None: + timeout=Utils.systemWaitTimeout startTime=time.time() remainingTime=timeout + Utils.Debug and Utils.Print("cmd: remaining time %d seconds" % (remainingTime)) while time.time()-startTime < timeout: synced=True for node in self.nodes: @@ -1221,6 +1290,7 @@ class Cluster(object): #Utils.Debug and Utils.Print("Brief pause to allow nodes to catch up.") sleepTime=3 if remainingTime > 3 else (3 - remainingTime) remainingTime -= sleepTime + Utils.Debug and Utils.Print("cmd: remaining time %d seconds" % (remainingTime)) time.sleep(sleepTime) return False @@ -1299,14 +1369,14 @@ class Cluster(object): for account in accounts: Utils.Print("Importing keys for account %s into wallet %s." % (account.name, wallet.name)) if not self.walletMgr.importKey(account, wallet): - Utils.errorExit("Failed to import key for account %s" % (account.name)) + Utils.Print("ERROR: Failed to import key for account %s" % (account.name)) return False self.accounts=accounts return True def getNode(self, id=0): - return self.nodes[0] + return self.nodes[id] def getNodes(self): return self.nodes @@ -1529,9 +1599,11 @@ class Cluster(object): except: pass - def waitForNextBlock(self, timeout=60): + def waitForNextBlock(self, timeout=None): + if timeout is None: + timeout=Utils.systemWaitTimeout node=self.nodes[0] - return node.waitForNextBlock() + return node.waitForNextBlock(timeout) def cleanup(self): for f in glob.glob("tn_data_*"): -- GitLab