提交 508e95ef 编写于 作者: C Ciju John

Enable nodeos_run_remote_test, distributed-transactions-test &...

Enable nodeos_run_remote_test, distributed-transactions-test & distributed-transactions-remote-test. Add multi-producer support to enabled tests. Remove dawn2 support.
上级 6ed4b18d
......@@ -48,7 +48,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/consensus-validation-malicious-produc
add_test(NAME chain_test_binaryen COMMAND chain_test --report_level=detailed --color_output -- --binaryen)
add_test(NAME chain_test_wavm COMMAND chain_test --report_level=detailed --color_output -- --wavm)
add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME nodeos_run_remote_test COMMAND tests/nodeos_run_remote_test.py --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME nodeos_run_remote_test COMMAND tests/nodeos_run_remote_test.py -v --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
if(BUILD_MONGO_DB_PLUGIN)
add_test(NAME nodeos_run_test-mongodb COMMAND tests/nodeos_run_test.py --mongodb -v --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
......@@ -59,8 +59,8 @@ endif()
# TODO: add_test(NAME p2p_sync_test_p2_d10 COMMAND tests/p2p_tests/sync/test.sh -p 2 -d 10 WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
# TODO: add_test(NAME message_storm COMMAND tests/p2p_tests/sync/test.sh -m -p 21 -n 21 -d 5 -l WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
# TODO: add_test(NAME trans_sync_across_mixed_cluster_test COMMAND tests/trans_sync_across_mixed_cluster_test.sh -p 1 -n 2 WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -p 1 -n 4 --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
# TODO: add_test(NAME distributed-transactions-remote-test COMMAND tests/distributed-transactions-remote-test.py --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -p 1 -n 4 -v --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME distributed-transactions-remote-test COMMAND tests/distributed-transactions-remote-test.py -v --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
# TODO: add_test(NAME restart-scenarios-test_resync COMMAND tests/restart-scenarios-test.py -c resync -p3 --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
# TODO: add_test(NAME restart-scenarios-test_replay COMMAND tests/restart-scenarios-test.py -c replay -p3 --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
# TODO: add_test(NAME consensus-validation-malicious-producers COMMAND tests/consensus-validation-malicious-producers.py -w 80 --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
......
......@@ -4,6 +4,8 @@ import testUtils
import argparse
import subprocess
import tempfile
import os
Print=testUtils.Utils.Print
......@@ -12,36 +14,53 @@ def errorExit(msg="", errorCode=1):
exit(errorCode)
pnodes=3
nodesFile="tests/sample-cluster-map.json"
# nodesFile="tests/sample-cluster-map.json"
parser = argparse.ArgumentParser()
parser.add_argument("-p", type=int, help="producing nodes count", default=pnodes)
parser.add_argument("-v", help="verbose", action='store_true')
parser.add_argument("--nodes-file", type=str, help="File containing nodes info in JSON format.", default=nodesFile)
# parser.add_argument("--nodes-file", type=str, help="File containing nodes info in JSON format.", default=nodesFile)
parser.add_argument("--not-noon", help="This is not the Noon branch.", action='store_true')
parser.add_argument("--dont-kill", help="Leave cluster running after test finishes", action='store_true')
parser.add_argument("--dump-error-details",
help="Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout",
action='store_true')
args = parser.parse_args()
pnodes=args.p
nodesFile=args.nodes_file
# nodesFile=args.nodes_file
debug=args.v
amINoon=not args.not_noon
dontKill=args.dont_kill
dumpErrorDetails=args.dump_error_details
testUtils.Utils.Debug=debug
killEosInstances=True
killEosInstances=not dontKill
topo="mesh"
delay=1
prodCount=1 # producers per producer node
total_nodes=pnodes
actualTest="tests/distributed-transactions-test.py"
testSuccessful=False
clusterMapJsonTemplate="""{
"keys": {
"initaPrivateKey": "%s",
"initbPrivateKey": "%s"
},
"nodes": [
{"port": 8888, "host": "localhost"},
{"port": 8889, "host": "localhost"},
{"port": 8890, "host": "localhost"}
]
}
"""
if not amINoon:
testUtils.Utils.iAmNotNoon()
cluster=testUtils.Cluster()
(fd, nodesFile) = tempfile.mkstemp()
try:
Print("BEGIN")
cluster.killall()
......@@ -50,7 +69,7 @@ try:
Print ("producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" %
(pnodes, total_nodes-pnodes, topo, delay))
Print("Stand up cluster")
if cluster.launch(pnodes, total_nodes, topo, delay) is False:
if cluster.launch(pnodes, total_nodes, prodCount, topo, delay) is False:
errorExit("Failed to stand up eos cluster.")
Print ("Wait for Cluster stabilization")
......@@ -58,7 +77,17 @@ try:
if not cluster.waitOnClusterBlockNumSync(3):
errorExit("Cluster never stabilized")
cmd="%s --nodes-file %s %s %s" % (actualTest, nodesFile, "-v" if debug else "", "" if amINoon else "--not-noon")
producerKeys=testUtils.Cluster.parseClusterKeys(total_nodes)
initaPrvtKey=producerKeys["inita"]["private"]
initbPrvtKey=producerKeys["initb"]["private"]
clusterMapJson = clusterMapJsonTemplate % (initaPrvtKey, initbPrvtKey)
tfile = os.fdopen(fd, "w")
tfile.write(clusterMapJson)
tfile.close()
cmd="%s --nodes-file %s %s %s %s" % (actualTest, nodesFile, "-v" if debug else "", "" if amINoon else "--not-noon", "--dont-kill" if dontKill else "")
Print("Starting up distributed transactions test: %s" % (actualTest))
Print("cmd: %s\n" % (cmd))
if 0 != subprocess.call(cmd, shell=True):
......@@ -67,6 +96,7 @@ try:
testSuccessful=True
Print("\nEND")
finally:
os.remove(nodesFile)
if not testSuccessful and dumpErrorDetails:
cluster.dumpErrorDetails()
Print("== Errors see above ==")
......
......@@ -14,6 +14,7 @@ def errorExit(msg="", errorCode=1):
parser = argparse.ArgumentParser()
parser.add_argument("-v", help="verbose", action='store_true')
parser.add_argument("--not-noon", help="This is not the Noon branch.", action='store_true')
parser.add_argument("--dont-kill", help="Leave cluster running after test finishes", action='store_true')
parser.add_argument("--dump-error-details",
help="Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout",
action='store_true')
......@@ -21,13 +22,15 @@ parser.add_argument("--dump-error-details",
args = parser.parse_args()
debug=args.v
amINoon=not args.not_noon
dontKill=args.dont_kill
dumpErrorDetails=args.dump_error_details
testUtils.Utils.Debug=debug
killEosInstances=True
killEosInstances=not dontKill
topo="mesh"
delay=1
prodCount=1 # producers per producer node
pnodes=1
total_nodes=pnodes
actualTest="tests/nodeos_run_test.py"
......@@ -47,7 +50,7 @@ try:
Print ("producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" %
(pnodes, total_nodes-pnodes, topo, delay))
Print("Stand up cluster")
if cluster.launch(pnodes, total_nodes, topo, delay) is False:
if cluster.launch(pnodes, total_nodes, prodCount, topo, delay) is False:
errorExit("Failed to stand up eos cluster.")
Print ("Wait for Cluster stabilization")
......@@ -59,7 +62,7 @@ try:
initaPrvtKey=producerKeys["inita"]["private"]
initbPrvtKey=producerKeys["initb"]["private"]
cmd="%s --dont-launch --inita_prvt_key %s --initb_prvt_key %s %s %s" % (actualTest, initaPrvtKey, initbPrvtKey, "-v" if debug else "", "" if amINoon else "--not-noon")
cmd="%s --dont-launch --inita_prvt_key %s --initb_prvt_key %s %s %s %s" % (actualTest, initaPrvtKey, initbPrvtKey, "-v" if debug else "", "" if amINoon else "--not-noon", "--dont-kill" if dontKill else "")
Print("Starting up %s test: %s" % ("nodeos" if amINoon else "eosd", actualTest))
Print("cmd: %s\n" % (cmd))
if 0 != subprocess.call(cmd, shell=True):
......
......@@ -86,6 +86,9 @@ try:
print("SERVER: %s" % (server))
print("PORT: %d" % (port))
if enableMongo and not cluster.isMongodDbRunning():
errorExit("MongoDb doesn't seem to be running.")
if localTest and not dontLaunch:
cluster.killall()
cluster.cleanup()
......@@ -415,7 +418,7 @@ try:
Print("verify abi is set")
account=node.getEosAccountFromDb(currencyAccount.name)
abiName=account["abi"]["structs"][0]["name"]
abiActionName=account["abi"]["actions"][0]["action_name"]
abiActionName=account["abi"]["actions"][0]["name"]
abiType=account["abi"]["actions"][0]["type"]
if abiName != "transfer" or abiActionName != "transfer" or abiType != "transfer":
errorExit("FAILURE - get table currency account failed", raw=True)
......
......@@ -192,6 +192,7 @@ class Node(object):
def stdinAndCheckOutput(cmd, subcommand):
outs=None
errs=None
ret=0
try:
popen=subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs,errs=popen.communicate(input=subcommand.encode("utf-8"))
......@@ -200,7 +201,7 @@ class Node(object):
msg=ex.output
return (ex.returncode, msg, None)
return (0, outs, errs)
return (ret, outs, errs)
@staticmethod
def normalizeJsonObject(extJStr):
......@@ -458,6 +459,7 @@ class Node(object):
# return None
if stakedDeposit > 0:
self.waitForTransIdOnNode(transId) # seems like account creation needs to be finlized before transfer can happen
trans = self.transferFunds(creatorAccount, account, stakedDeposit, "init")
transId=Node.getTransId(trans)
# if waitForTransBlock and not self.waitForTransIdOnNode(transId):
......@@ -553,15 +555,14 @@ class Node(object):
def waitForBlockNumOnNode(self, blockNum, timeout=None):
if timeout is None:
timeout=Utils.systemWaitTimeout
startTime=time.time()
remainingTime=timeout
Utils.Debug and Utils.Print("cmd: remaining time %d seconds" % (remainingTime))
while time.time()-startTime < timeout:
endTime=time.time()+timeout
while endTime > time.time():
if self.doesNodeHaveBlockNum(blockNum):
return True
sleepTime=3 if remainingTime > 3 else (3 - remainingTime)
remainingTime -= sleepTime
Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (sleepTime))
sleepTime=3
Utils.Debug and Utils.Print("cmd: sleep %d seconds, remaining time: %d seconds" %
(sleepTime, endTime - time.time()))
time.sleep(sleepTime)
return False
......@@ -569,15 +570,14 @@ class Node(object):
def waitForTransIdOnNode(self, transId, timeout=None):
if timeout is None:
timeout=Utils.systemWaitTimeout
startTime=time.time()
remainingTime=timeout
Utils.Debug and Utils.Print("cmd: remaining time %d seconds" % (remainingTime))
while time.time()-startTime < timeout:
endTime=time.time()+timeout
while endTime > time.time():
if self.doesNodeHaveTransId(transId):
return True
sleepTime=3 if remainingTime > 3 else (3 - remainingTime)
remainingTime -= sleepTime
Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (sleepTime))
sleepTime=3
Utils.Debug and Utils.Print("cmd: sleep %d seconds, remaining time: %d seconds" %
(sleepTime, endTime - time.time()))
time.sleep(sleepTime)
return False
......@@ -585,21 +585,20 @@ class Node(object):
def waitForNextBlock(self, timeout=None):
if timeout is None:
timeout=Utils.systemWaitTimeout
startTime=time.time()
remainingTime=timeout
Utils.Debug and Utils.Print("cmd: remaining time %d seconds" % (remainingTime))
endTime=time.time()+timeout
num=self.getIrreversibleBlockNum()
Utils.Debug and Utils.Print("Current block number: %s" % (num))
while time.time()-startTime < timeout:
while endTime > time.time():
nextNum=self.getIrreversibleBlockNum()
if nextNum > num:
Utils.Debug and Utils.Print("Next block number: %s" % (nextNum))
return True
sleepTime=.5 if remainingTime > .5 else (.5 - remainingTime)
remainingTime -= sleepTime
Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (sleepTime))
sleepTime=.5
Utils.Debug and Utils.Print("cmd: sleep %d seconds, remaining time: %d seconds" %
(sleepTime, endTime - time.time()))
time.sleep(sleepTime)
return False
......@@ -1146,13 +1145,14 @@ class Cluster(object):
self.walletMgr=walletMgr
# launch local nodes and set self.nodes
def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", delay=0):
def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", delay=1):
"""Launch cluster.
pnodes: producer nodes count
totalNodes: producer + non-producer nodes count
prodCount: producers per prodcuer node count
topo: cluster topology (as defined by launcher)
delay: delay between individual nodes laucnh (as defined by launcher)
delay: delay between individual nodes launch (as defined by launcher)
delay 0 exposes a bootstrap bug where producer handover may have a large gap confusing nodes and bringing system to a halt.
"""
if not self.localCluster:
Utils.Print("WARNING: Cluster not local, not launching %s." % (Utils.EosServerName))
......@@ -1195,7 +1195,8 @@ class Cluster(object):
self.nodes=nodes
# ensure cluster inter-connect by ensuring everyone has block 1
# ensure cluster node are inter-connected by ensuring everyone has block 1
Utils.Debug and Utils.Print("Cluster viability smoke test. Validate every cluster node has block 1. ")
if not self.waitOnClusterBlockNumSync(1):
Utils.Print("ERROR: Cluster doesn't seem to be in sync. Some nodes missing block 1")
return False
......@@ -1210,18 +1211,18 @@ class Cluster(object):
Utils.Print("ERROR: Unable to parse cluster info")
return False
initaKeys=producerKeys["inita"]
initbKeys=producerKeys["initb"]
if initaKeys is None or initbKeys is None:
init1Keys=producerKeys["inita"]
init2Keys=producerKeys["initb"]
if init1Keys is None or init2Keys is None:
Utils.Print("ERROR: Failed to parse inita or intb private keys from cluster config files.")
self.initaAccount.ownerPrivateKey=initaKeys["private"]
self.initaAccount.ownerPublicKey=initaKeys["public"]
self.initaAccount.activePrivateKey=initaKeys["private"]
self.initaAccount.activePublicKey=initaKeys["public"]
self.initbAccount.ownerPrivateKey=initbKeys["private"]
self.initbAccount.ownerPublicKey=initbKeys["public"]
self.initbAccount.activePrivateKey=initbKeys["private"]
self.initbAccount.activePublicKey=initbKeys["public"]
self.initaAccount.ownerPrivateKey=init1Keys["private"]
self.initaAccount.ownerPublicKey=init1Keys["public"]
self.initaAccount.activePrivateKey=init1Keys["private"]
self.initaAccount.activePublicKey=init1Keys["public"]
self.initbAccount.ownerPrivateKey=init2Keys["private"]
self.initbAccount.ownerPublicKey=init2Keys["public"]
self.initbAccount.activePrivateKey=init2Keys["private"]
self.initbAccount.activePublicKey=init2Keys["public"]
producerKeys.pop("eosio")
return True
......@@ -1310,10 +1311,9 @@ class Cluster(object):
def waitOnClusterBlockNumSync(self, targetHeadBlockNum, timeout=None):
if timeout is None:
timeout=Utils.systemWaitTimeout
startTime=time.time()
remainingTime=timeout
Utils.Debug and Utils.Print("cmd: remaining time %d seconds" % (remainingTime))
while time.time()-startTime < timeout:
endTime=time.time()+timeout
while endTime > time.time():
synced=True
for node in self.nodes:
if node.alive:
......@@ -1323,10 +1323,9 @@ class Cluster(object):
if synced is True:
return True
#Utils.Debug and Utils.Print("Brief pause to allow nodes to catch up.")
sleepTime=3 if remainingTime > 3 else (3 - remainingTime)
remainingTime -= sleepTime
Utils.Debug and Utils.Print("cmd: remaining time %d seconds" % (remainingTime))
sleepTime=3
Utils.Debug and Utils.Print("cmd: sleep %d seconds, remaining time: %d seconds" %
(sleepTime, endTime - time.time()))
time.sleep(sleepTime)
return False
......@@ -1597,7 +1596,9 @@ class Cluster(object):
@staticmethod
def bootstrap(totalNodes, prodCount, biosHost, biosPort):
"""Create 'prodCount' init accounts and deposits 10000000000 EOS in each. If prodCount is -1 will initialize all possible producers."""
"""Create 'prodCount' init accounts and deposits 10000000000 EOS in each. If prodCount is -1 will initialize all possible producers.
Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1."""
biosNode=Node(biosHost, biosPort)
if not biosNode.checkPulse():
Utils.Print("ERROR: Bios node doesn't appear to be running...")
......@@ -1766,8 +1767,7 @@ class Cluster(object):
if platform.linux_distribution()[0] == "Ubuntu" or platform.linux_distribution()[0] == "LinuxMint" or platform.linux_distribution()[0] == "Fedora":
pgrepOpts="-a"
startTime=time.time()
remainingTime=timeout
endTime=time.time()+timeout
checkForNodes=True
psOut=None
......@@ -1781,11 +1781,11 @@ class Cluster(object):
except subprocess.CalledProcessError as ex:
pass
checkForNodes= (remainingTime > 0)
checkForNodes= (endTime > time.time())
if checkForNodes:
sleepTime=3 if remainingTime > 3 else (3 - remainingTime)
remainingTime -= sleepTime
Utils.Debug and Utils.Print("cmd: sleep %d seconds" % (sleepTime))
sleepTime=3
Utils.Debug and Utils.Print("cmd: sleep %d seconds, remaining time: %d seconds" %
(sleepTime, endTime - time.time()))
time.sleep(sleepTime)
if psOut is not None:
......@@ -1797,10 +1797,10 @@ class Cluster(object):
break
instance=Node(self.host, self.port + i, pid=int(m.group(1)), cmd=m.group(2), alive=True, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb)
instance.setWalletEndpointArgs(self.walletEndpointArgs)
Utils.Debug and Utils.Print("Node:", instance)
Utils.Debug and Utils.Print("Node>", instance)
nodes.append(instance)
else:
Utils.Print("ERROR: No nodes discovered.")
else:
Utils.Print("ERROR: No nodes discovered.")
return nodes
......@@ -1890,6 +1890,17 @@ class Cluster(object):
except:
pass
def isMongodDbRunning(self):
cmd="%s %s" % (Utils.MongoPath, self.mongoEndpointArgs)
subcommand="db.version()"
Utils.Debug and Utils.Print("echo %s | %s" % (subcommand, cmd))
ret,outs,errs=Node.stdinAndCheckOutput(cmd.split(), subcommand)
if ret is not 0:
Utils.Print("ERROR: Failed to check database version: %s" % (Node.byteArrToStr(errs)) )
return False
Utils.Debug and Utils.Print("MongoDb response: %s" % (outs))
return True
def waitForNextBlock(self, timeout=None):
if timeout is None:
timeout=Utils.systemWaitTimeout
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册