提交 b305666d 编写于 作者: C Ciju John

Remove remote tests as they are very close duplicates of the target tests....

Remove remote tests as they are very close duplicates of the target tests. Rename methods to reflect action. Add new sync restart test.
上级 796db612
......@@ -38,8 +38,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate-dirty-db.py ${CMAKE_CURRENT_
#To run plugin_test with all log from blockchain displayed, put --verbose after --, i.e. plugin_test -- --verbose
add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_output)
add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME nodeos_run_remote_test COMMAND tests/nodeos_run_remote_test.py -v --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
#if(BUILD_MONGO_DB_PLUGIN)
......@@ -47,13 +46,11 @@ add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_
#endif()
# TODO: Tests removed until working again on master.
add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 -v --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME distributed-transactions-remote-test COMMAND tests/distributed-transactions-remote-test.py -v --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
# TODO removed on slim: add_test(NAME restart-scenarios-test_resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
# add_test(NAME restart-scenarios-test_replay COMMAND tests/restart-scenarios-test.py -c replay -p4 -v --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME restart-scenarios-test_hard_replay COMMAND tests/restart-scenarios-test.py -c hardReplay -p4 -v --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME restart-scenarios-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME restart-scenarios-test-hard_replay COMMAND tests/restart-scenarios-test.py -c hardReplay -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
# TODO: add_test(NAME consensus-validation-malicious-producers COMMAND tests/consensus-validation-malicious-producers.py -w 80 --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME validate_dirty_db_test COMMAND tests/validate-dirty-db.py -v --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME validate_dirty_db_test COMMAND tests/validate-dirty-db.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
if(ENABLE_COVERAGE_TESTING)
......
......@@ -333,7 +333,7 @@ def myTest(transWillEnterBlock):
transId=testUtils.Node.getTransId(trans[1])
Print("verify transaction exists")
if not node2.waitForTransIdOnNode(transId):
if not node2.waitForTransInBlock(transId):
error("Transaction never made it to node2")
return False
......
......@@ -7,6 +7,12 @@ import subprocess
import tempfile
import os
###############################################################
# distributed-transactions-remote-test
# Tests remote capability of the distributed-transactions-test. Test will setup cluster and pass nodes info to distributed-transactions-test. E.g.
# distributed-transactions-remote-test.py -v --clean-run --dump-error-detail
###############################################################
Print=testUtils.Utils.Print
def errorExit(msg="", errorCode=1):
......@@ -18,19 +24,19 @@ pnodes=1
parser = argparse.ArgumentParser()
parser.add_argument("-p", type=int, help="producing nodes count", default=pnodes)
parser.add_argument("-v", help="verbose", action='store_true')
parser.add_argument("--dont-kill", help="Leave cluster running after test finishes", action='store_true')
parser.add_argument("--leave-running", help="Leave cluster running after test finishes", action='store_true')
parser.add_argument("--dump-error-details",
help="Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout",
action='store_true')
parser.add_argument("--kill-all", help="Kill all nodeos and kleos instances", action='store_true')
parser.add_argument("--clean-run", help="Kill all nodeos and kleos instances", action='store_true')
args = parser.parse_args()
pnodes=args.p
# nodesFile=args.nodes_file
debug=args.v
dontKill=args.dont_kill
dontKill=args.leave_running
dumpErrorDetails=args.dump_error_details
killAll=args.kill_all
killAll=args.clean_run
testUtils.Utils.Debug=debug
......
......@@ -21,11 +21,11 @@ parser.add_argument("-s", type=str, help="topology", default="mesh")
parser.add_argument("-v", help="verbose", action='store_true')
parser.add_argument("--nodes-file", type=str, help="File containing nodes info in JSON format.")
parser.add_argument("--seed", type=int, help="random seed", default=seed)
parser.add_argument("--dont-kill", help="Leave cluster running after test finishes", action='store_true')
parser.add_argument("--leave-running", help="Leave cluster running after test finishes", action='store_true')
parser.add_argument("--dump-error-details",
help="Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout",
action='store_true')
parser.add_argument("--kill-all", help="Kill all nodeos and kleos instances", action='store_true')
parser.add_argument("--clean-run", help="Kill all nodeos and kleos instances", action='store_true')
args = parser.parse_args()
pnodes=args.p
......@@ -35,9 +35,9 @@ total_nodes = pnodes if args.n == 0 else args.n
debug=args.v
nodesFile=args.nodes_file
seed=args.seed
dontKill=args.dont_kill
dontKill=args.leave_running
dumpErrorDetails=args.dump_error_details
killAll=args.kill_all
killAll=args.clean_run
killWallet=not dontKill
killEosInstances=not dontKill
......
......@@ -5,6 +5,12 @@ import testUtils
import argparse
import subprocess
###############################################################
# nodeos_run_remote_test
# Tests remote capability of the nodeos_run_test. Test will setup cluster and pass nodes info to nodeos_run_test. E.g.
# nodeos_run_remote_test.py -v --clean-run --dump-error-detail
###############################################################
Print=testUtils.Utils.Print
def errorExit(msg="", errorCode=1):
......@@ -13,19 +19,19 @@ def errorExit(msg="", errorCode=1):
parser = argparse.ArgumentParser()
parser.add_argument("-v", help="verbose", action='store_true')
parser.add_argument("--dont-kill", help="Leave cluster running after test finishes", action='store_true')
parser.add_argument("--leave-running", help="Leave cluster running after test finishes", action='store_true')
parser.add_argument("--only-bios", help="Limit testing to bios node.", action='store_true')
parser.add_argument("--dump-error-details",
help="Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout",
action='store_true')
parser.add_argument("--kill-all", help="Kill all nodeos and kleos instances", action='store_true')
parser.add_argument("--clean-run", help="Kill all nodeos and kleos instances", action='store_true')
args = parser.parse_args()
debug=args.v
dontKill=args.dont_kill
dontKill=args.leave_running
dumpErrorDetails=args.dump_error_details
onlyBios=args.only_bios
killAll=args.kill_all
killAll=args.clean_run
testUtils.Utils.Debug=debug
......
......@@ -49,9 +49,9 @@ parser.add_argument("--dont-launch", help="Don't launch own node. Assume node is
parser.add_argument("--keep-logs", help="Don't delete var/lib/node_* folders upon test completion",
action='store_true')
parser.add_argument("-v", help="verbose logging", action='store_true')
parser.add_argument("--dont-kill", help="Leave cluster running after test finishes", action='store_true')
parser.add_argument("--leave-running", help="Leave cluster running after test finishes", action='store_true')
parser.add_argument("--only-bios", help="Limit testing to bios node.", action='store_true')
parser.add_argument("--kill-all", help="Kill all nodeos and kleos instances", action='store_true')
parser.add_argument("--clean-run", help="Kill all nodeos and kleos instances", action='store_true')
args = parser.parse_args()
testOutputFile=args.output
......@@ -64,10 +64,10 @@ defproducerbPrvtKey=args.defproducerb_prvt_key
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontLaunch=args.dont_launch
dontKill=args.dont_kill
dontKill=args.leave_running
prodCount=args.prod_count
onlyBios=args.only_bios
killAll=args.kill_all
killAll=args.clean_run
testUtils.Utils.Debug=debug
localTest=True if server == LOCAL_HOST else False
......@@ -374,7 +374,7 @@ try:
# errorExit("FAILURE - %s servants. Expected: [], Actual: %s" % (
# testeraAccount.name, actualServants), raw=True)
node.waitForTransIdOnNode(transId)
node.waitForTransInBlock(transId)
transaction=None
if not enableMongo:
......@@ -518,7 +518,7 @@ try:
errorExit("Failed to reject duplicate message for currency1111 contract")
Print("verify transaction exists")
if not node.waitForTransIdOnNode(transId):
if not node.waitForTransInBlock(transId):
cmdError("%s get transaction trans_id" % (ClientName))
errorExit("Failed to verify push message transaction id.")
......@@ -748,7 +748,7 @@ try:
Print("CurrentBlockNum: %d" % (currentBlockNum))
Print("Request blocks 1-%d" % (currentBlockNum))
for blockNum in range(1, currentBlockNum+1):
block=node.getBlock(str(blockNum), retry=False, silentErrors=False)
block=node.getBlock(blockNum, retry=False, silentErrors=False)
if block is None:
cmdError("%s get block" % (ClientName))
errorExit("get block by num %d" % blockNum)
......@@ -769,7 +769,7 @@ try:
Print("Request invalid block numbered %d. This will generate an expected error message." % (currentBlockNum+1000))
block=node.getBlock(str(currentBlockNum+1000), silentErrors=True, retry=False)
block=node.getBlock(currentBlockNum+1000, silentErrors=True, retry=False)
if block is not None:
errorExit("ERROR: Received block where not expected")
else:
......
......@@ -59,7 +59,7 @@ class StressNetwork:
if trid is None:
return ([], "", 0.0, "failed to issue currency0000")
print("transaction id %s" % (trid))
node.waitForTransIdOnNode(trid)
node.waitForTransInBlock(trid)
self.trList = []
expBal = 0
......@@ -97,7 +97,7 @@ class StressNetwork:
for tr in self.trList:
trid = node.getTransId(tr)
transIdlist.append(trid)
node.waitForTransIdOnNode(trid)
node.waitForTransInBlock(trid)
return (transIdlist, acc2.name, expBal, "")
def on_exit(self):
......
......@@ -4,6 +4,7 @@ import testUtils
import argparse
import random
import traceback
###############################################################
# Test for different nodes restart scenarios.
......@@ -25,6 +26,7 @@ Print=testUtils.Utils.Print
def errorExit(msg="", errorCode=1):
Print("ERROR:", msg)
traceback.print_stack(limit=-1)
exit(errorCode)
parser = argparse.ArgumentParser()
......@@ -32,19 +34,20 @@ parser.add_argument("-p", type=int, help="producing nodes count", default=2)
parser.add_argument("-d", type=int, help="delay between nodes startup", default=1)
parser.add_argument("-s", type=str, help="topology", default="mesh")
parser.add_argument("-c", type=str, help="chain strategy[%s|%s|%s]" %
(testUtils.Utils.SyncResyncTag, testUtils.Utils.SyncReplayTag, testUtils.Utils.SyncNoneTag),
(testUtils.Utils.SyncResyncTag, testUtils.Utils.SyncNoneTag, testUtils.Utils.SyncHardReplayTag),
default=testUtils.Utils.SyncResyncTag)
parser.add_argument("--kill-sig", type=str, help="kill signal[%s|%s]" %
(testUtils.Utils.SigKillTag, testUtils.Utils.SigTermTag), default=testUtils.Utils.SigKillTag)
parser.add_argument("--kill-count", type=int, help="nodeos instances to kill", default=-1)
parser.add_argument("-v", help="verbose logging", action='store_true')
parser.add_argument("--dont-kill", help="Leave cluster running after test finishes", action='store_true')
parser.add_argument("--leave-running", help="Leave cluster running after test finishes", action='store_true')
parser.add_argument("--dump-error-details",
help="Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout",
action='store_true')
parser.add_argument("--keep-logs", help="Don't delete var/lib/node_* folders upon test completion",
action='store_true')
parser.add_argument("--kill-all", help="Kill all nodeos and kleos instances", action='store_true')
parser.add_argument("--clean-run", help="Kill all nodeos and kleos instances", action='store_true')
parser.add_argument("--p2p-plugin", help="select a p2p plugin to use (either net or bnet). Defaults to net.", default="net")
args = parser.parse_args()
pnodes=args.p
......@@ -55,16 +58,18 @@ debug=args.v
total_nodes = pnodes
killCount=args.kill_count if args.kill_count > 0 else 1
killSignal=args.kill_sig
killEosInstances= not args.dont_kill
killEosInstances= not args.leave_running
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
killAll=args.kill_all
killAll=args.clean_run
p2pPlugin=args.p2p_plugin
seed=1
testUtils.Utils.Debug=debug
testSuccessful=False
assert (chainSyncStrategyStr == testUtils.Utils.SyncResyncTag or chainSyncStrategyStr == testUtils.Utils.SyncNoneTag or
chainSyncStrategyStr == testUtils.Utils.SyncHardReplayTag)
random.seed(seed) # Use a fixed seed for repeatability.
cluster=testUtils.Cluster(walletd=True)
walletMgr=testUtils.WalletMgr(True)
......@@ -75,12 +80,14 @@ try:
cluster.killall(allInstances=killAll)
cluster.cleanup()
walletMgr.killall(allInstances=killAll)
walletMgr.cleanup()
Print ("producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d, chain sync strategy: %s" % (
pnodes, topo, delay, chainSyncStrategyStr))
Print("Stand up cluster")
if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay) is False:
if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False:
errorExit("Failed to stand up eos cluster.")
Print ("Wait for Cluster stabilization")
......@@ -113,7 +120,6 @@ try:
errorExit("Failed to import key for account %s" % (defproduceraAccount.name))
Print("Create accounts.")
#if not cluster.createAccounts(defproduceraAccount):
if not cluster.createAccounts(eosioAccount):
errorExit("Accounts creation failed.")
......@@ -121,23 +127,11 @@ try:
if not cluster.waitOnClusterSync():
errorExit("Cluster sync wait failed.")
# TBD: Known issue (Issue 2043) that 'get currency0000 balance' doesn't return balance.
# Uncomment when functional
Print("Spread funds and validate")
if not cluster.spreadFundsAndValidate(10):
errorExit("Failed to spread and validate funds.")
Print("Wait on cluster sync.")
if not cluster.waitOnClusterSync():
errorExit("Cluster sync wait failed.")
Print("Kill %d cluster node instances." % (killCount))
if cluster.killSomeEosInstances(killCount, killSignal) is False:
errorExit("Failed to kill Eos instances")
Print("nodeos instances killed.")
# TBD: Known issue (Issue 2043) that 'get currency0000 balance' doesn't return balance.
# Uncomment when functional
Print("Spread funds and validate")
if not cluster.spreadFundsAndValidate(10):
errorExit("Failed to spread and validate funds.")
......@@ -156,8 +150,6 @@ try:
errorExit("Cluster never synchronized")
Print ("Cluster synched")
# TBD: Known issue (Issue 2043) that 'get currency0000 balance' doesn't return balance.
# Uncomment when functional
Print("Spread funds and validate")
if not cluster.spreadFundsAndValidate(10):
errorExit("Failed to spread and validate funds.")
......@@ -168,6 +160,10 @@ try:
testSuccessful=True
finally:
if testSuccessful:
Print("Test succeeded.")
else:
Print("Test failed.")
if not testSuccessful and dumpErrorDetails:
cluster.dumpErrorDetails()
walletMgr.dumpErrorDetails()
......
......@@ -89,7 +89,7 @@ class Utils:
def checkOutput(cmd):
assert(isinstance(cmd, list))
popen=subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(output,error)=popen.communicate();
(output,error)=popen.communicate()
if popen.returncode != 0:
raise subprocess.CalledProcessError(returncode=popen.returncode, cmd=cmd, output=error)
return output.decode("utf-8")
......@@ -305,9 +305,9 @@ class Node(object):
# pylint: disable=too-many-branches
def getBlock(self, blockNum, retry=True, silentErrors=False):
"""Given a blockId will return block details."""
assert(isinstance(blockNum, str))
assert(isinstance(blockNum, int))
if not self.enableMongo:
cmd="%s %s get block %s" % (Utils.EosClientPath, self.endpointArgs, blockNum)
cmd="%s %s get block %d" % (Utils.EosClientPath, self.endpointArgs, blockNum)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
try:
trans=Utils.runCmdReturnJson(cmd)
......@@ -320,7 +320,7 @@ class Node(object):
else:
for _ in range(2):
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand='db.Blocks.findOne( { "block_num": %s } )' % (blockNum)
subcommand='db.Blocks.findOne( { "block_num": %d } )' % (blockNum)
if Utils.Debug: Utils.Print("cmd: echo '%s' | %s" % (subcommand, cmd))
try:
trans=Node.runMongoCmdReturnJson(cmd.split(), subcommand)
......@@ -361,20 +361,55 @@ class Node(object):
return None
def doesNodeHaveBlockNum(self, blockNum):
# def doesNodeHaveBlockNum(self, blockNum):
# """Does node have head_block_num >= blockNum"""
# assert isinstance(blockNum, int)
# assert (blockNum > 0)
# info=self.getInfo(silentErrors=True)
# assert(info)
# head_block_num=0
# try:
# head_block_num=int(info["head_block_num"])
# except (TypeError, KeyError) as _:
# Utils.Print("Failure in get info parsing. %s" % (info))
# raise
# return True if blockNum <= head_block_num else False
def isBlockPresent(self, blockNum):
"""Does node have head_block_num >= blockNum"""
assert isinstance(blockNum, int)
assert (blockNum > 0)
info=self.getInfo(silentErrors=True)
assert(info)
node_block_num=0
try:
node_block_num=int(info["head_block_num"])
except (TypeError, KeyError) as _:
Utils.Print("Failure in get info parsing. %s" % (info))
raise
return True if blockNum <= node_block_num else False
def isBlockFinalized(self, blockNum):
"""Is blockNum finalized"""
assert(blockNum)
assert isinstance(blockNum, int)
assert (blockNum > 0)
info=self.getInfo(silentErrors=True)
assert(info)
last_irreversible_block_num=0
node_block_num=0
try:
last_irreversible_block_num=int(info["last_irreversible_block_num"])
node_block_num=int(info["last_irreversible_block_num"])
except (TypeError, KeyError) as _:
Utils.Print("Failure in get info parsing. %s" % (info))
raise
return True if blockNum <= last_irreversible_block_num else True
return True if blockNum <= node_block_num else False
# pylint: disable=too-many-branches
def getTransaction(self, transId, retry=True, silentErrors=False):
......@@ -418,7 +453,7 @@ class Node(object):
assert(transId)
assert(isinstance(transId, str))
assert(blockId)
assert(isinstance(blockId, str))
assert(isinstance(blockId, int))
block=self.getBlock(blockId)
transactions=None
......@@ -441,7 +476,7 @@ class Node(object):
return False
def getBlockIdByTransId(self, transId):
"""Given a transaction Id (string), will return block id (string) containing the transaction"""
"""Given a transaction Id (string), will return block id (int) containing the transaction"""
assert(transId)
assert(isinstance(transId, str))
trans=self.getTransaction(transId)
......@@ -463,17 +498,28 @@ class Node(object):
Utils.Print("Info parsing failed. %s" % (headBlockNum))
for blockNum in range(refBlockNum, headBlockNum+1):
if self.isTransInBlock(str(transId), str(blockNum)):
return str(blockNum)
if self.isTransInBlock(str(transId), blockNum):
return blockNum
return None
def doesNodeHaveTransId(self, transId):
def isTransInAnyBlock(self, transId):
"""Check if transaction (transId) is in a block."""
assert(transId)
assert(isinstance(transId, str))
blockId=self.getBlockIdByTransId(transId)
return True if blockId else False
def isTransFinalized(self, transId):
"""Check if transaction (transId) has been finalized."""
assert(transId)
assert(isinstance(transId, str))
blockId=self.getBlockIdByTransId(transId)
return True if blockId else None
if not blockId:
return False
assert(isinstance(blockId, int))
return self.isBlockFinalized(blockId)
# Disabling MongodDB funbction
# def getTransByBlockId(self, blockId, retry=True, silentErrors=False):
......@@ -560,11 +606,11 @@ class Node(object):
return None
if stakedDeposit > 0:
self.waitForTransIdOnNode(transId) # seems like account creation needs to be finlized before transfer can happen
self.waitForTransInBlock(transId) # seems like account creation needs to be finalized before transfer can happen
trans = self.transferFunds(creatorAccount, account, Node.currencyIntToStr(stakedDeposit, CORE_SYMBOL), "init")
transId=Node.getTransId(trans)
if waitForTransBlock and not self.waitForTransIdOnNode(transId):
if waitForTransBlock and not self.waitForTransInBlock(transId):
return None
return trans
......@@ -587,11 +633,11 @@ class Node(object):
return None
if stakedDeposit > 0:
self.waitForTransIdOnNode(transId) # seems like account creation needs to be finlized before transfer can happen
self.waitForTransInBlock(transId) # seems like account creation needs to be finlized before transfer can happen
trans = self.transferFunds(creatorAccount, account, "%0.04f %s" % (stakedDeposit/10000, CORE_SYMBOL), "init")
transId=Node.getTransId(trans)
if waitForTransBlock and not self.waitForTransIdOnNode(transId):
if waitForTransBlock and not self.waitForTransInBlock(transId):
return None
return trans
......@@ -702,13 +748,16 @@ class Node(object):
return None
def waitForBlockNumOnNode(self, blockNum, timeout=None):
lam = lambda: self.doesNodeHaveBlockNum(blockNum)
def waitForTransInBlock(self, transId, timeout=None):
"""Wait for trans id to be finalized."""
lam = lambda: self.isTransInAnyBlock(transId)
ret=Utils.waitForBool(lam, timeout)
return ret
def waitForTransIdOnNode(self, transId, timeout=None):
lam = lambda: self.doesNodeHaveTransId(transId)
def waitForTransFinalization(self, transId, timeout=None):
"""Wait for trans id to be finalized."""
assert(isinstance(transId, str))
lam = lambda: self.isTransFinalized(transId)
ret=Utils.waitForBool(lam, timeout)
return ret
......@@ -745,7 +794,7 @@ class Node(object):
assert(trans)
transId=Node.getTransId(trans)
if waitForTransBlock and not self.waitForTransIdOnNode(transId):
if waitForTransBlock and not self.waitForTransInBlock(transId):
return None
return trans
......@@ -940,7 +989,7 @@ class Node(object):
Node.validateTransaction(trans)
transId=Node.getTransId(trans)
if waitForTransBlock and not self.waitForTransIdOnNode(transId):
if waitForTransBlock and not self.waitForTransInBlock(transId):
return None
return trans
......@@ -999,7 +1048,7 @@ class Node(object):
return None
transId=Node.getTransId(trans)
if waitForTransBlock and not self.waitForTransIdOnNode(transId):
if waitForTransBlock and not self.waitForTransInBlock(transId):
return None
return trans
......@@ -1086,12 +1135,30 @@ class Node(object):
return True
# TBD: make nodeId an internal property
def relaunch(self, nodeId, chainArg, timeout=Utils.systemWaitTimeout):
# pylint: disable=too-many-locals
def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout):
assert(self.pid is None)
assert(self.killed)
if Utils.Debug: Utils.Print("Launching node process, Id: %d" % (nodeId))
cmdArr=[]
myCmd=self.cmd
if not newChain:
skip=False
for i in self.cmd.split():
Utils.Print("\"%s\"" % (i))
if skip:
skip=False
continue
if "--genesis-json" == i or "--genesis-timestamp" == i:
skip=True
continue
cmdArr.append(i)
myCmd=" ".join(cmdArr)
dataDir="var/lib/node_%02d" % (nodeId)
dt = datetime.datetime.now()
dateStr="%d_%02d_%02d_%02d_%02d_%02d" % (
......@@ -1099,7 +1166,8 @@ class Node(object):
stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr)
stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr)
with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr:
cmd=self.cmd + ("" if chainArg is None else (" " + chainArg))
#cmd=self.cmd + ("" if chainArg is None else (" " + chainArg))
cmd=myCmd + ("" if chainArg is None else (" " + chainArg))
Utils.Print("cmd: %s" % (cmd))
popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr)
self.pid=popen.pid
......@@ -1288,15 +1356,10 @@ class WalletMgr(object):
with open(WalletMgr.__walletLogFile, "r") as f:
shutil.copyfileobj(f, sys.stdout)
# @staticmethod
# def killall():
# cmd="pkill -9 %s" % (Utils.EosWalletName)
# if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
# subprocess.call(cmd.split())
def killall(self, allInstances=False):
"""Kill keos instances. allInstances will kill all keos instances running on the system."""
if self.__walletPid:
Utils.Print("Killing wallet manager process %d:" % (self.__walletPid))
os.kill(self.__walletPid, signal.SIGKILL)
if allInstances:
......@@ -1384,7 +1447,7 @@ class Cluster(object):
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", delay=1, onlyBios=False, dontKill=False, dontBootstrap=False):
def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontKill=False, dontBootstrap=False):
"""Launch cluster.
pnodes: producer nodes count
totalNodes: producer + non-producer nodes count
......@@ -1400,13 +1463,13 @@ class Cluster(object):
if len(self.nodes) > 0:
raise RuntimeError("Cluster already running.")
cmd="%s -p %s -n %s -s %s -d %s -i %s -f --p2p-plugin bnet" % (
Utils.EosLauncherPath, pnodes, totalNodes, topo, delay, datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3])
cmd="%s -p %s -n %s -s %s -d %s -i %s -f --p2p-plugin %s" % (
Utils.EosLauncherPath, pnodes, totalNodes, topo, delay, datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3], p2pPlugin)
cmdArr=cmd.split()
if self.staging:
cmdArr.append("--nogen")
nodeosArgs="--max-transaction-time 5000 --filter-on *"
nodeosArgs="--max-transaction-time 5000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes)
if not self.walletd:
nodeosArgs += " --plugin eosio::wallet_api_plugin"
if self.enableMongo:
......@@ -1533,13 +1596,14 @@ class Cluster(object):
self.nodes=nodes
return True
# manually set nodes, alternative to explicit launch
def setNodes(self, nodes):
"""manually set nodes, alternative to explicit launch"""
self.nodes=nodes
# If a last transaction exists wait for it on root node, then collect its head block number.
# Wait on this block number on each cluster node
def waitOnClusterSync(self, timeout=None):
"""Get head block on node 0, then ensure the block is present on every cluster node."""
assert(self.nodes)
assert(len(self.nodes) > 0)
targetHeadBlockNum=self.nodes[0].getHeadBlockNum() #get root nodes head block num
if Utils.Debug: Utils.Print("Head block number on root node: %d" % (targetHeadBlockNum))
if targetHeadBlockNum == -1:
......@@ -1547,16 +1611,23 @@ class Cluster(object):
return self.waitOnClusterBlockNumSync(targetHeadBlockNum, timeout)
def waitOnClusterBlockNumSync(self, targetHeadBlockNum, timeout=None):
def waitOnClusterBlockNumSync(self, targetBlockNum, timeout=None):
"""Wait for all nodes to have targetBlockNum finalized."""
assert(self.nodes)
def doNodesHaveBlockNum(nodes, targetHeadBlockNum):
def doNodesHaveBlockNum(nodes, targetBlockNum):
for node in nodes:
if (not node.killed) and (not node.doesNodeHaveBlockNum(targetHeadBlockNum)):
try:
if (not node.killed) and (not node.isBlockPresent(targetBlockNum)):
#if (not node.killed) and (not node.isBlockFinalized(targetBlockNum)):
return False
except (TypeError) as _:
# This can happen if client connects before server is listening
return False
return True
lam = lambda: doNodesHaveBlockNum(self.nodes, targetHeadBlockNum)
lam = lambda: doNodesHaveBlockNum(self.nodes, targetBlockNum)
ret=Utils.waitForBool(lam, timeout)
return ret
......@@ -1692,8 +1763,8 @@ class Cluster(object):
#Utils.Print("nextEosIdx: %d, count: %d" % (nextEosIdx, count))
node=self.nodes[nextEosIdx]
if Utils.Debug: Utils.Print("Wait for trasaction id %s on node port %d" % (transId, node.port))
if node.waitForTransIdOnNode(transId) is False:
if Utils.Debug: Utils.Print("Wait for transaction id %s on node port %d" % (transId, node.port))
if node.waitForTransInBlock(transId) is False:
Utils.Print("ERROR: Selected node never received transaction id %s" % (transId))
return False
......@@ -1713,8 +1784,8 @@ class Cluster(object):
# As an extra step wait for last transaction on the root node
node=self.nodes[0]
if Utils.Debug: Utils.Print("Wait for trasaction id %s on node port %d" % (transId, node.port))
if node.waitForTransIdOnNode(transId) is False:
if Utils.Debug: Utils.Print("Wait for transaction id %s on node port %d" % (transId, node.port))
if node.waitForTransInBlock(transId) is False:
Utils.Print("ERROR: Selected node never received transaction id %s" % (transId))
return False
......@@ -1946,7 +2017,7 @@ class Cluster(object):
accounts.append(initx)
transId=Node.getTransId(trans)
biosNode.waitForTransIdOnNode(transId)
biosNode.waitForTransInBlock(transId)
Utils.Print("Validating system accounts within bootstrap")
biosNode.validateAccounts(accounts)
......@@ -1993,7 +2064,7 @@ class Cluster(object):
trans=trans[1]
transId=Node.getTransId(trans)
if not biosNode.waitForTransIdOnNode(transId):
if not biosNode.waitForTransInBlock(transId):
return False
# wait for block production handover (essentially a block produced by anyone but eosio).
......@@ -2033,7 +2104,7 @@ class Cluster(object):
Node.validateTransaction(trans)
transId=Node.getTransId(trans)
biosNode.waitForTransIdOnNode(transId)
biosNode.waitForTransInBlock(transId)
contract="eosio.token"
contractDir="contracts/%s" % (contract)
......@@ -2058,7 +2129,7 @@ class Cluster(object):
Node.validateTransaction(trans[1])
transId=Node.getTransId(trans[1])
biosNode.waitForTransIdOnNode(transId)
biosNode.waitForTransInBlock(transId)
contract=eosioTokenAccount.name
Utils.Print("push issue action to %s contract" % (contract))
......@@ -2073,7 +2144,8 @@ class Cluster(object):
Node.validateTransaction(trans[1])
Utils.Print("Wait for issue action transaction to become finalized.")
transId=Node.getTransId(trans[1])
biosNode.waitForTransIdOnNode(transId)
# biosNode.waitForTransInBlock(transId)
biosNode.waitForTransFinalization(transId)
expectedAmount="1000000000.0000 {0}".format(CORE_SYMBOL)
Utils.Print("Verify eosio issue, Expected: %s" % (expectedAmount))
......@@ -2112,7 +2184,7 @@ class Cluster(object):
Utils.Print("Wait for last transfer transaction to become finalized.")
transId=Node.getTransId(trans[1])
if not biosNode.waitForTransIdOnNode(transId):
if not biosNode.waitForTransInBlock(transId):
return False
Utils.Print("Cluster bootstrap done.")
......@@ -2152,7 +2224,7 @@ class Cluster(object):
if Utils.Debug: Utils.Print("pgrep output: \"%s\"" % psOut)
for i in range(0, totalNodes):
pattern=r"[\n]?(\d+) (.* --data-dir var/lib/node_%02d)" % (i)
pattern=r"[\n]?(\d+) (.* --data-dir var/lib/node_%02d .*)\n" % (i)
m=re.search(pattern, psOut, re.MULTILINE)
if m is None:
Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern))
......@@ -2187,9 +2259,10 @@ class Cluster(object):
chainArg=self.__chainSyncStrategy.arg
newChain= False if self.__chainSyncStrategy.name == Utils.SyncHardReplayTag else True
for i in range(0, len(self.nodes)):
node=self.nodes[i]
if node.killed and not node.relaunch(i, chainArg):
if node.killed and not node.relaunch(i, chainArg, newChain=newChain):
return False
return True
......@@ -2288,7 +2361,7 @@ class Cluster(object):
if waitForTransBlock and transId is not None:
node=self.nodes[0]
if Utils.Debug: Utils.Print("Wait for transaction id %s on server port %d." % ( transId, node.port))
if node.waitForTransIdOnNode(transId) is False:
if node.waitForTransInBlock(transId) is False:
Utils.Print("ERROR: Failed waiting for transaction id %s on server port %d." % (
transId, node.port))
return False
......
......@@ -5,8 +5,6 @@ import testUtils
import argparse
import random
import subprocess
import time
import os
import signal
###############################################################
......@@ -22,12 +20,13 @@ def errorExit(msg="", errorCode=1):
parser = argparse.ArgumentParser()
parser.add_argument("-v", help="verbose logging", action='store_true')
parser.add_argument("--dont-kill", help="Leave cluster running after test finishes", action='store_true')
parser.add_argument("--leave-running", help="Leave cluster running after test finishes", action='store_true')
parser.add_argument("--dump-error-details",
help="Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout",
action='store_true')
parser.add_argument("--keep-logs", help="Don't delete var/lib/node_* folders upon test completion",
action='store_true')
parser.add_argument("--clean-run", help="Kill all nodeos and kleos instances", action='store_true')
args = parser.parse_args()
debug=args.v
......@@ -39,21 +38,42 @@ total_nodes = pnodes
killCount=1
killSignal=testUtils.Utils.SigKillTag
killEosInstances= not args.dont_kill
killEosInstances= not args.leave_running
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
killAll=args.clean_run
seed=1
testUtils.Utils.Debug=debug
testSuccessful=False
def runNodeosAndGetOutput(myNodeId, myTimeout=3):
"""Startup nodeos, wait for timeout (before forced shutdown) and collect output. Stdout, stderr and return code are returned in a dictionary."""
Print("Launching nodeos process id: %d" % (myNodeId))
cmd="programs/nodeos/nodeos --config-dir etc/eosio/node_bios --data-dir var/lib/node_bios"
Print("cmd: %s" % (cmd))
proc=subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output={}
try:
outs,errs = proc.communicate(timeout=myTimeout)
output["stdout"] = outs.decode("utf-8")
output["stderr"] = errs.decode("utf-8")
output["returncode"] = proc.returncode
except (subprocess.TimeoutExpired) as _:
Print("ERROR: Nodeos is running beyond the defined wait time. Hard killing nodeos instance.")
proc.send_signal(signal.SIGKILL)
return (False, None)
return (True, output)
random.seed(seed) # Use a fixed seed for repeatability.
cluster=testUtils.Cluster(walletd=True)
try:
cluster.setChainStrategy(chainSyncStrategyStr)
cluster.killall()
cluster.killall(allInstances=killAll)
cluster.cleanup()
Print ("producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d, chain sync strategy: %s" % (
......@@ -68,28 +88,7 @@ try:
errorExit("Cluster in bad state, received None node")
Print("Kill cluster nodes.")
cluster.killall()
def runNodeosAndGetOutput(nodeId, timeout=3):
"""Startup nodeos, wait for timeout (before forced shutdown) and collect output. Stdout, stderr and return code are returned in a dictionary."""
Print("Launching nodeos process id: %d" % (nodeId))
dataDir="var/lib/node_%02d" % (nodeId)
cmd="programs/nodeos/nodeos --config-dir etc/eosio/node_bios --data-dir var/lib/node_bios"
Print("cmd: %s" % (cmd))
proc=subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output={}
try:
outs,errs = proc.communicate(timeout=timeout)
output["stdout"] = outs.decode("utf-8")
output["stderr"] = errs.decode("utf-8")
output["returncode"] = proc.returncode
except (subprocess.TimeoutExpired) as _:
Print("ERROR: Nodeos is running beyond the defined wait time. Hard killing nodeos instance.")
proc.send_signal(signal.SIGKILL)
return (False, None)
return (True, output)
cluster.killall(allInstances=killAll)
Print("Restart nodeos repeatedly to ensure dirty database flag sticks.")
nodeId=0
......@@ -98,12 +97,15 @@ try:
for i in range(0,3):
Print("Attempt %d." % (i))
ret = runNodeosAndGetOutput(nodeId, timeout)
assert(ret)
assert(isinstance(ret, tuple))
if not ret or not ret[0]:
exit(1)
#Print(ret)
stderr=ret[1]["stderr"]
assert(ret[1])
assert(isinstance(ret[1], dict))
# pylint: disable=unsubscriptable-object
stderr= ret[1]["stderr"]
retCode=ret[1]["returncode"]
assert(retCode == 2)
assert("database dirty flag set" in stderr)
......@@ -121,7 +123,7 @@ finally:
if killEosInstances:
Print("Shut down the cluster.")
cluster.killall()
cluster.killall(allInstances=killAll)
if testSuccessful and not keepLogs:
Print("Cleanup cluster data.")
cluster.cleanup()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册