提交 83e81af3 编写于 作者: M Matt Witherspoon

Merge remote-tracking branch 'origin/develop' into macos_proper_https

......@@ -27,7 +27,7 @@ set( CXX_STANDARD_REQUIRED ON)
set(VERSION_MAJOR 1)
set(VERSION_MINOR 3)
set(VERSION_PATCH 0)
set(VERSION_PATCH 1)
set( CLI_CLIENT_EXECUTABLE_NAME cleos )
set( NODE_EXECUTABLE_NAME nodeos )
......
......@@ -41,10 +41,12 @@ find_package(Boost 1.67 REQUIRED COMPONENTS
find_library(libtester eosio_testing @CMAKE_INSTALL_FULL_LIBDIR@)
find_library(libchain eosio_chain @CMAKE_INSTALL_FULL_LIBDIR@)
if ( "${CMAKE_BUILD_TYPE}" EQUAL "Debug" )
if ( "${CMAKE_BUILD_TYPE}" STREQUAL "Debug" )
find_library(libfc fc_debug @CMAKE_INSTALL_FULL_LIBDIR@)
find_library(libsecp256k1 secp256k1_debug @CMAKE_INSTALL_FULL_LIBDIR@)
else()
find_library(libfc fc @CMAKE_INSTALL_FULL_LIBDIR@)
find_library(libsecp256k1 secp256k1 @CMAKE_INSTALL_FULL_LIBDIR@)
endif()
find_library(libwasm WASM @CMAKE_INSTALL_FULL_LIBDIR@)
......@@ -59,7 +61,6 @@ find_library(liboscrypto crypto @OPENSSL_ROOT_DIR@/lib)
find_library(libosssl ssl @OPENSSL_ROOT_DIR@/lib)
find_library(libchainbase chainbase @CMAKE_INSTALL_FULL_LIBDIR@)
find_library(libbuiltins builtins @CMAKE_INSTALL_FULL_LIBDIR@)
find_library(libsecp256k1 secp256k1 @CMAKE_INSTALL_FULL_LIBDIR@)
find_library(GMP_LIBRARIES NAMES libgmp.a gmp.lib gmp libgmp-10 mpir
HINTS ENV GMP_LIB_DIR
ENV GMP_DIR
......
......@@ -41,10 +41,13 @@ find_package(Boost 1.67 REQUIRED COMPONENTS
find_library(libtester eosio_testing @CMAKE_BINARY_DIR@/libraries/testing)
find_library(libchain eosio_chain @CMAKE_BINARY_DIR@/libraries/chain)
if ( "${CMAKE_BUILD_TYPE}" EQUAL "Debug" )
if ( "${CMAKE_BUILD_TYPE}" STREQUAL "Debug" )
find_library(libfc fc_debug @CMAKE_BINARY_DIR@/libraries/fc)
find_library(libsecp256k1 secp256k1_debug @CMAKE_BINARY_DIR@/libraries/fc/secp256k1)
else()
find_library(libfc fc @CMAKE_BINARY_DIR@/libraries/fc)
find_library(libsecp256k1 secp256k1 @CMAKE_BINARY_DIR@/libraries/fc/secp256k1)
endif()
find_library(libwasm WASM @CMAKE_BINARY_DIR@/libraries/wasm-jit/Source/WASM)
......@@ -59,7 +62,6 @@ find_library(liboscrypto crypto @OPENSSL_ROOT_DIR@/lib)
find_library(libosssl ssl @OPENSSL_ROOT_DIR@/lib)
find_library(libchainbase chainbase @CMAKE_BINARY_DIR@/libraries/chainbase)
find_library(libbuiltins builtins @CMAKE_BINARY_DIR@/libraries/builtins)
find_library(libsecp256k1 secp256k1 @CMAKE_BINARY_DIR@/libraries/fc/secp256k1)
find_library(GMP_LIBRARIES NAMES libgmp.a gmp.lib gmp libgmp-10 mpir
HINTS ENV GMP_LIB_DIR
ENV GMP_DIR
......
......@@ -20,10 +20,10 @@ cd eos/Docker
docker build . -t eosio/eos
```
The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.3.0 tag, you could do the following:
The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.3.1 tag, you could do the following:
```bash
docker build -t eosio/eos:v1.3.0 --build-arg branch=v1.3.0 .
docker build -t eosio/eos:v1.3.1 --build-arg branch=v1.3.1 .
```
By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image.
......
......@@ -49,8 +49,15 @@
DOXYGEN=false
ENABLE_COVERAGE_TESTING=false
CORE_SYMBOL_NAME="SYS"
# Use current directory's tmp directory if noexec is enabled for /tmp
if (mount | grep "/tmp " | grep --quiet noexec); then
mkdir -p $SOURCE_DIR/tmp
TEMP_DIR="${SOURCE_DIR}/tmp"
rm -rf $SOURCE_DIR/tmp/*
else # noexec wasn't found
TEMP_DIR="/tmp"
fi
START_MAKE=true
TEMP_DIR="/tmp"
TIME_BEGIN=$( date -u +%s )
VERSION=1.2
......@@ -153,7 +160,7 @@
OS_NAME=$( cat /etc/os-release | grep ^NAME | cut -d'=' -f2 | sed 's/\"//gI' )
case "$OS_NAME" in
"Amazon Linux AMI")
"Amazon Linux AMI"|"Amazon Linux")
FILE="${SOURCE_DIR}/scripts/eosio_build_amazon.sh"
CXX_COMPILER=g++
C_COMPILER=gcc
......@@ -269,7 +276,8 @@
exit 0
fi
if ! make -j"${CPU_CORE}"
if [ -z ${JOBS} ]; then JOBS=$CPU_CORE; fi # Future proofing: Ensure $JOBS is set (usually set in scripts/eosio_build_*.sh scripts)
if ! make -j"${JOBS}"
then
printf "\\n\\t>>>>>>>>>>>>>>>>>>>> MAKE building EOSIO has exited with the above error.\\n\\n"
exit -1
......
......@@ -91,12 +91,6 @@ namespace eosio { namespace chain {
operator unsigned __int128()const { return value; }
};
inline std::vector<name> sort_names( std::vector<name>&& names ) {
fc::deduplicate(names);
return names;
}
} } // eosio::chain
namespace std {
......
Subproject commit 4724baf2095cdc1bb1722254874b51070adf0e74
Subproject commit 8ca96ad6b18709d65a7d1f67f8893978f25babcf
Subproject commit 6a83237f9e3e71160bbd64d3c87c3418d057624c
Subproject commit 29cd7df702e79954076461af0eadad2e9d745d44
......@@ -6,8 +6,12 @@ Third parties are encouraged to make pull requests to this file (`develop` branc
| Description | URL |
| ----------- | --- |
| Watch for specific actions and send them to an HTTP URL | https://github.com/eosauthority/eosio-watcher-plugin |
| ElasticSearch | https://github.com/EOSLaoMao/elasticsearch_plugin |
| Kafka | https://github.com/TP-Lab/kafka_plugin |
| MySQL | https://github.com/eosBLACK/eosio_mysqldb_plugin |
| SQL | https://github.com/asiniscalchi/eosio_sql_plugin |
| Watch for specific actions and send them to an HTTP URL | https://github.com/eosauthority/eosio-watcher-plugin |
| ZeroMQ | https://github.com/cc32d9/eos_zmq_plugin |
## DISCLAIMER:
......
......@@ -111,7 +111,8 @@ public:
void remove_account_control( const account_name& name, const permission_name& permission );
/// @return true if act should be added to mongodb, false to skip it
bool filter_include( const chain::action_trace& action_trace ) const;
bool filter_include( const account_name& receiver, const action_name& act_name,
const vector<chain::permission_level>& authorization ) const;
void init();
void wipe_database();
......@@ -127,6 +128,7 @@ public:
bool filter_on_star = true;
std::set<filter_entry> filter_on;
std::set<filter_entry> filter_out;
bool update_blocks_via_block_num = false;
bool store_blocks = true;
bool store_block_states = true;
bool store_transactions = true;
......@@ -217,20 +219,22 @@ const std::string mongo_db_plugin_impl::accounts_col = "accounts";
const std::string mongo_db_plugin_impl::pub_keys_col = "pub_keys";
const std::string mongo_db_plugin_impl::account_controls_col = "account_controls";
bool mongo_db_plugin_impl::filter_include( const chain::action_trace& action_trace ) const {
bool mongo_db_plugin_impl::filter_include( const account_name& receiver, const action_name& act_name,
const vector<chain::permission_level>& authorization ) const
{
bool include = false;
if( filter_on_star ) {
include = true;
} else {
auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&action_trace]( const auto& filter ) {
return filter.match( action_trace.receipt.receiver, action_trace.act.name, 0 );
auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&receiver, &act_name]( const auto& filter ) {
return filter.match( receiver, act_name, 0 );
} );
if( itr != filter_on.cend() ) {
include = true;
} else {
for( const auto& a : action_trace.act.authorization ) {
auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&action_trace, &a]( const auto& filter ) {
return filter.match( action_trace.receipt.receiver, action_trace.act.name, a.actor );
for( const auto& a : authorization ) {
auto itr = std::find_if( filter_on.cbegin(), filter_on.cend(), [&receiver, &act_name, &a]( const auto& filter ) {
return filter.match( receiver, act_name, a.actor );
} );
if( itr != filter_on.cend() ) {
include = true;
......@@ -241,15 +245,16 @@ bool mongo_db_plugin_impl::filter_include( const chain::action_trace& action_tra
}
if( !include ) { return false; }
if( filter_out.empty() ) { return true; }
auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&action_trace]( const auto& filter ) {
return filter.match( action_trace.receipt.receiver, action_trace.act.name, 0 );
auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&receiver, &act_name]( const auto& filter ) {
return filter.match( receiver, act_name, 0 );
} );
if( itr != filter_out.cend() ) { return false; }
for( const auto& a : action_trace.act.authorization ) {
auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&action_trace, &a]( const auto& filter ) {
return filter.match( action_trace.receipt.receiver, action_trace.act.name, a.actor );
for( const auto& a : authorization ) {
auto itr = std::find_if( filter_out.cbegin(), filter_out.cend(), [&receiver, &act_name, &a]( const auto& filter ) {
return filter.match( receiver, act_name, a.actor );
} );
if( itr != filter_out.cend() ) { return false; }
}
......@@ -694,6 +699,27 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti
using bsoncxx::builder::basic::make_array;
namespace bbb = bsoncxx::builder::basic;
const auto& trx = t->trx;
if( !filter_on_star || !filter_out.empty() ) {
bool include = false;
for( const auto& a : trx.actions ) {
if( filter_include( a.account, a.name, a.authorization ) ) {
include = true;
break;
}
}
if( !include ) {
for( const auto& a : trx.context_free_actions ) {
if( filter_include( a.account, a.name, a.authorization ) ) {
include = true;
break;
}
}
}
if( !include ) return;
}
auto trans_doc = bsoncxx::builder::basic::document{};
auto now = std::chrono::duration_cast<std::chrono::milliseconds>(
......@@ -701,7 +727,6 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti
const auto& trx_id = t->id;
const auto trx_id_str = trx_id.str();
const auto& trx = t->trx;
trans_doc.append( kvp( "trx_id", trx_id_str ) );
......@@ -776,7 +801,8 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces
}
bool added = false;
if( start_block_reached && store_action_traces && filter_include( atrace ) ) {
if( start_block_reached && store_action_traces &&
filter_include( atrace.receipt.receiver, atrace.act.name, atrace.act.authorization ) ) {
auto action_traces_doc = bsoncxx::builder::basic::document{};
const chain::base_action_trace& base = atrace; // without inline action traces
......@@ -930,9 +956,16 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr
block_state_doc.append( kvp( "createdAt", b_date{now} ) );
try {
if( !_block_states.update_one( make_document( kvp( "block_id", block_id_str ) ),
make_document( kvp( "$set", block_state_doc.view() ) ), update_opts ) ) {
EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block_state ${bid}", ("bid", block_id) );
if( update_blocks_via_block_num ) {
if( !_block_states.update_one( make_document( kvp( "block_num", b_int32{static_cast<int32_t>(block_num)} ) ),
make_document( kvp( "$set", block_state_doc.view() ) ), update_opts ) ) {
EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block_state ${num}", ("num", block_num) );
}
} else {
if( !_block_states.update_one( make_document( kvp( "block_id", block_id_str ) ),
make_document( kvp( "$set", block_state_doc.view() ) ), update_opts ) ) {
EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block_state ${bid}", ("bid", block_id) );
}
}
} catch( ... ) {
handle_mongo_exception( "block_states insert: " + json, __LINE__ );
......@@ -963,9 +996,16 @@ void mongo_db_plugin_impl::_process_accepted_block( const chain::block_state_ptr
block_doc.append( kvp( "createdAt", b_date{now} ) );
try {
if( !_blocks.update_one( make_document( kvp( "block_id", block_id_str ) ),
make_document( kvp( "$set", block_doc.view() ) ), update_opts ) ) {
EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block ${bid}", ("bid", block_id) );
if( update_blocks_via_block_num ) {
if( !_blocks.update_one( make_document( kvp( "block_num", b_int32{static_cast<int32_t>(block_num)} ) ),
make_document( kvp( "$set", block_doc.view() ) ), update_opts ) ) {
EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block ${num}", ("num", block_num) );
}
} else {
if( !_blocks.update_one( make_document( kvp( "block_id", block_id_str ) ),
make_document( kvp( "$set", block_doc.view() ) ), update_opts ) ) {
EOS_ASSERT( false, chain::mongo_db_insert_fail, "Failed to insert block ${bid}", ("bid", block_id) );
}
}
} catch( ... ) {
handle_mongo_exception( "blocks insert: " + json, __LINE__ );
......@@ -1427,6 +1467,8 @@ void mongo_db_plugin::set_program_options(options_description& cli, options_desc
"MongoDB URI connection string, see: https://docs.mongodb.com/master/reference/connection-string/."
" If not specified then plugin is disabled. Default database 'EOS' is used if not specified in URI."
" Example: mongodb://127.0.0.1:27017/EOS")
("mongodb-update-via-block-num", bpo::value<bool>()->default_value(false),
"Update blocks/block_state with latest via block number so that duplicates are overwritten.")
("mongodb-store-blocks", bpo::value<bool>()->default_value(true),
"Enables storing blocks in mongodb.")
("mongodb-store-block-states", bpo::value<bool>()->default_value(true),
......@@ -1476,6 +1518,9 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options)
if( options.count( "mongodb-block-start" )) {
my->start_block_num = options.at( "mongodb-block-start" ).as<uint32_t>();
}
if( options.count( "mongodb-update-via-block-num" )) {
my->update_blocks_via_block_num = options.at( "mongodb-update-via-block-num" ).as<bool>();
}
if( options.count( "mongodb-store-blocks" )) {
my->store_blocks = options.at( "mongodb-store-blocks" ).as<bool>();
}
......
......@@ -852,10 +852,22 @@ namespace eosio {
}
block_id_type head_id;
block_id_type lib_id;
uint32_t lib_num;
block_id_type remote_head_id;
uint32_t remote_head_num = 0;
try {
lib_num = cc.last_irreversible_block_num();
lib_id = cc.last_irreversible_block_id();
if (last_handshake_recv.generation >= 1) {
remote_head_id = last_handshake_recv.head_id;
remote_head_num = block_header::num_from_id(remote_head_id);
fc_dlog(logger, "maybe truncating branch at = ${h}:${id}",("h",remote_head_num)("id",remote_head_id));
}
// base our branch off of the last handshake we sent the peer instead of our current
// LIB which could have moved forward in time as packets were in flight.
if (last_handshake_sent.generation >= 1) {
lib_id = last_handshake_sent.last_irreversible_block_id;
} else {
lib_id = cc.last_irreversible_block_id();
}
head_id = cc.fork_db_head_block_id();
}
catch (const assert_exception &ex) {
......@@ -872,6 +884,13 @@ namespace eosio {
block_id_type null_id;
for (auto bid = head_id; bid != null_id && bid != lib_id; ) {
try {
// if the last handshake received indicates that we are catching up on a fork
// that the peer is already partially aware of, no need to resend blocks
if (remote_head_id == bid) {
break;
}
signed_block_ptr b = cc.fetch_block_by_id(bid);
if ( b ) {
bid = b->previous;
......@@ -886,7 +905,7 @@ namespace eosio {
}
size_t count = 0;
if (!bstack.empty()) {
if (bstack.back()->previous == lib_id) {
if (bstack.back()->previous == lib_id || bstack.back()->previous == remote_head_id) {
count = bstack.size();
while (bstack.size()) {
enqueue(*bstack.back());
......@@ -1537,7 +1556,7 @@ namespace eosio {
else {
c->last_handshake_recv.last_irreversible_block_num = msg.known_trx.pending;
reset_lib_num (c);
start_sync(c, msg.known_blocks.pending);
start_sync(c, msg.known_trx.pending);
}
}
......
......@@ -3,3 +3,4 @@ add_subdirectory( cleos )
add_subdirectory( keosd )
add_subdirectory( eosio-launcher )
add_subdirectory( eosio-abigen )
add_subdirectory( eosio-blocklog )
......@@ -22,6 +22,7 @@
#include <eosio/chain/exceptions.hpp>
#include <eosio/http_plugin/http_plugin.hpp>
#include <eosio/chain_plugin/chain_plugin.hpp>
#include <boost/asio/ssl/rfc2818_verification.hpp>
#include "httpc.hpp"
using boost::asio::ip::tcp;
......@@ -234,9 +235,10 @@ namespace eosio { namespace client { namespace http {
boost::asio::ssl::stream<boost::asio::ip::tcp::socket> socket(cp.context->ios, ssl_context);
SSL_set_tlsext_host_name(socket.native_handle(), url.server.c_str());
if(cp.verify_cert)
if(cp.verify_cert) {
socket.set_verify_mode(boost::asio::ssl::verify_peer);
socket.set_verify_callback(boost::asio::ssl::rfc2818_verification(url.server));
}
do_connect(socket.next_layer(), url);
socket.handshake(boost::asio::ssl::stream_base::client);
re = do_txrx(socket, request, status_code);
......
......@@ -91,7 +91,9 @@ namespace eosio { namespace client { namespace http {
const string get_table_func = chain_func_base + "/get_table_rows";
const string get_table_by_scope_func = chain_func_base + "/get_table_by_scope";
const string get_code_func = chain_func_base + "/get_code";
const string get_code_hash_func = chain_func_base + "/get_code_hash";
const string get_abi_func = chain_func_base + "/get_abi";
const string get_raw_abi_func = chain_func_base + "/get_raw_abi";
const string get_raw_code_and_abi_func = chain_func_base + "/get_raw_code_and_abi";
const string get_currency_balance_func = chain_func_base + "/get_currency_balance";
const string get_currency_stats_func = chain_func_base + "/get_currency_stats";
......
......@@ -2295,15 +2295,18 @@ int main( int argc, char** argv ) {
string abiPath;
bool shouldSend = true;
bool contract_clear = false;
bool suppress_duplicate_check = false;
auto codeSubcommand = setSubcommand->add_subcommand("code", localized("Create or update the code on an account"));
codeSubcommand->add_option("account", account, localized("The account to set code for"))->required();
codeSubcommand->add_option("code-file", wasmPath, localized("The fullpath containing the contract WASM"));//->required();
codeSubcommand->add_flag( "-c,--clear", contract_clear, localized("Remove code on an account"));
codeSubcommand->add_flag( "--suppress-duplicate-check", suppress_duplicate_check, localized("Don't check for duplicate"));
auto abiSubcommand = setSubcommand->add_subcommand("abi", localized("Create or update the abi on an account"));
abiSubcommand->add_option("account", account, localized("The account to set the ABI for"))->required();
abiSubcommand->add_option("abi-file", abiPath, localized("The fullpath containing the contract ABI"));//->required();
abiSubcommand->add_flag( "-c,--clear", contract_clear, localized("Remove abi on an account"));
abiSubcommand->add_flag( "--suppress-duplicate-check", suppress_duplicate_check, localized("Don't check for duplicate"));
auto contractSubcommand = setSubcommand->add_subcommand("contract", localized("Create or update the contract on an account"));
contractSubcommand->add_option("account", account, localized("The account to publish a contract for"))
......@@ -2315,9 +2318,24 @@ int main( int argc, char** argv ) {
auto abi = contractSubcommand->add_option("abi-file,-a,--abi", abiPath, localized("The ABI for the contract relative to contract-dir"));
// ->check(CLI::ExistingFile);
contractSubcommand->add_flag( "-c,--clear", contract_clear, localized("Rmove contract on an account"));
contractSubcommand->add_flag( "--suppress-duplicate-check", suppress_duplicate_check, localized("Don't check for duplicate"));
std::vector<chain::action> actions;
auto set_code_callback = [&]() {
std::vector<char> old_wasm;
bool duplicate = false;
fc::sha256 old_hash, new_hash;
if (!suppress_duplicate_check) {
try {
const auto result = call(get_code_hash_func, fc::mutable_variant_object("account_name", account));
old_hash = fc::sha256(result["code_hash"].as_string());
} catch (...) {
std::cerr << "Failed to get existing code hash, continue without duplicate check..." << std::endl;
suppress_duplicate_check = true;
}
}
bytes code_bytes;
if(!contract_clear){
std::string wasm;
......@@ -2338,20 +2356,42 @@ int main( int argc, char** argv ) {
if(wasm.compare(0, 8, binary_wasm_header))
std::cerr << localized("WARNING: ") << wasmPath << localized(" doesn't look like a binary WASM file. Is it something else, like WAST? Trying anyways...") << std::endl;
code_bytes = bytes(wasm.begin(), wasm.end());
} else {
code_bytes = bytes();
}
if (!suppress_duplicate_check) {
if (code_bytes.size()) {
new_hash = fc::sha256::hash(&(code_bytes[0]), code_bytes.size());
}
duplicate = (old_hash == new_hash);
}
actions.emplace_back( create_setcode(account, code_bytes ) );
if ( shouldSend ) {
std::cerr << localized("Setting Code...") << std::endl;
send_actions(std::move(actions), 10000, packed_transaction::zlib);
if (!duplicate) {
actions.emplace_back( create_setcode(account, code_bytes ) );
if ( shouldSend ) {
std::cerr << localized("Setting Code...") << std::endl;
send_actions(std::move(actions), 10000, packed_transaction::zlib);
}
} else {
std::cout << "Skipping set code because the new code is the same as the existing code" << std::endl;
}
};
auto set_abi_callback = [&]() {
bytes old_abi;
bool duplicate = false;
if (!suppress_duplicate_check) {
try {
const auto result = call(get_raw_abi_func, fc::mutable_variant_object("account_name", account));
old_abi = result["abi"].as_blob().data;
} catch (...) {
std::cerr << "Failed to get existing raw abi, continue without duplicate check..." << std::endl;
suppress_duplicate_check = true;
}
}
bytes abi_bytes;
if(!contract_clear){
fc::path cpath(contractPath);
......@@ -2366,17 +2406,24 @@ int main( int argc, char** argv ) {
EOS_ASSERT( fc::exists( abiPath ), abi_file_not_found, "no abi file found ${f}", ("f", abiPath) );
abi_bytes = fc::raw::pack(fc::json::from_file(abiPath).as<abi_def>());
} else {
abi_bytes = bytes();
}
try {
actions.emplace_back( create_setabi(account, abi_bytes) );
} EOS_RETHROW_EXCEPTIONS(abi_type_exception, "Fail to parse ABI JSON")
if ( shouldSend ) {
std::cerr << localized("Setting ABI...") << std::endl;
send_actions(std::move(actions), 10000, packed_transaction::zlib);
if (!suppress_duplicate_check) {
duplicate = (old_abi.size() == abi_bytes.size() && std::equal(old_abi.begin(), old_abi.end(), abi_bytes.begin()));
}
if (!duplicate) {
try {
actions.emplace_back( create_setabi(account, abi_bytes) );
} EOS_RETHROW_EXCEPTIONS(abi_type_exception, "Fail to parse ABI JSON")
if ( shouldSend ) {
std::cerr << localized("Setting ABI...") << std::endl;
send_actions(std::move(actions), 10000, packed_transaction::zlib);
}
} else {
std::cout << "Skipping set abi because the new abi is the same as the existing abi" << std::endl;
}
};
......@@ -2388,8 +2435,12 @@ int main( int argc, char** argv ) {
shouldSend = false;
set_code_callback();
set_abi_callback();
std::cerr << localized("Publishing contract...") << std::endl;
send_actions(std::move(actions), 10000, packed_transaction::zlib);
if (actions.size()) {
std::cerr << localized("Publishing contract...") << std::endl;
send_actions(std::move(actions), 10000, packed_transaction::zlib);
} else {
std::cout << "no transaction is sent" << std::endl;
}
});
codeSubcommand->set_callback(set_code_callback);
abiSubcommand->set_callback(set_abi_callback);
......@@ -3075,6 +3126,7 @@ int main( int argc, char** argv ) {
if (verbose_errors) {
elog("connect error: ${e}", ("e", e.to_detail_string()));
}
return 1;
} catch (const fc::exception& e) {
// attempt to extract the error code if one is present
if (!print_recognized_errors(e, verbose_errors)) {
......
add_executable( eosio-blocklog main.cpp )
if( UNIX AND NOT APPLE )
set(rt_library rt )
endif()
find_package( Gperftools QUIET )
if( GPERFTOOLS_FOUND )
message( STATUS "Found gperftools; compiling eosio-blocklog with TCMalloc")
list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc )
endif()
target_include_directories(eosio-blocklog PUBLIC ${CMAKE_CURRENT_BINARY_DIR})
target_link_libraries( eosio-blocklog
PRIVATE appbase
PRIVATE eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} )
install( TARGETS
eosio-blocklog
RUNTIME DESTINATION ${CMAKE_INSTALL_FULL_BINDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}
)
/**
* @file
* @copyright defined in eosio/LICENSE.txt
*/
#include <eosio/chain/abi_serializer.hpp>
#include <eosio/chain/block_log.hpp>
#include <eosio/chain/config.hpp>
#include <eosio/chain/reversible_block_object.hpp>
#include <fc/io/json.hpp>
#include <fc/filesystem.hpp>
#include <fc/variant.hpp>
#include <boost/exception/diagnostic_information.hpp>
#include <boost/program_options.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/path.hpp>
using namespace eosio::chain;
namespace bfs = boost::filesystem;
namespace bpo = boost::program_options;
using bpo::options_description;
using bpo::variables_map;
struct blocklog {
blocklog()
{}
void read_log();
void set_program_options(options_description& cli);
void initialize(const variables_map& options);
bfs::path blocks_dir;
bfs::path output_file;
uint32_t first_block;
uint32_t last_block;
bool no_pretty_print;
};
void blocklog::read_log() {
block_log block_logger(blocks_dir);
const auto end = block_logger.read_head();
EOS_ASSERT( end, block_log_exception, "No blocks found in block log" );
EOS_ASSERT( end->block_num() > 1, block_log_exception, "Only one block found in block log" );
ilog( "existing block log contains block num 1 through block num ${n}", ("n",end->block_num()) );
optional<chainbase::database> reversible_blocks;
try {
reversible_blocks.emplace(blocks_dir / config::reversible_blocks_dir_name, chainbase::database::read_only, config::default_reversible_cache_size);
reversible_blocks->add_index<reversible_block_index>();
const auto& idx = reversible_blocks->get_index<reversible_block_index,by_num>();
auto first = idx.lower_bound(end->block_num());
auto last = idx.rbegin();
if (first != idx.end() && last != idx.rend())
ilog( "existing reversible block num ${first} through block num ${last} ", ("first",first->get_block()->block_num())("last",last->get_block()->block_num()) );
else {
elog( "no blocks available in reversible block database: only block_log blocks are available" );
reversible_blocks.reset();
}
} catch( const std::runtime_error& e ) {
if( std::string(e.what()) == "database dirty flag set" ) {
elog( "database dirty flag set (likely due to unclean shutdown): only block_log blocks are available" );
} else if( std::string(e.what()) == "database metadata dirty flag set" ) {
elog( "database metadata dirty flag set (likely due to unclean shutdown): only block_log blocks are available" );
} else {
throw;
}
}
std::ofstream output_blocks;
std::ostream* out;
if (!output_file.empty()) {
output_blocks.open(output_file.generic_string().c_str());
if (output_blocks.fail()) {
std::ostringstream ss;
ss << "Unable to open file '" << output_file.string() << "'";
throw std::runtime_error(ss.str());
}
out = &output_blocks;
}
else
out = &std::cout;
uint32_t block_num = (first_block < 1) ? 1 : first_block;
signed_block_ptr next;
fc::variant pretty_output;
const fc::microseconds deadline = fc::seconds(10);
auto print_block = [&](signed_block_ptr& next) {
abi_serializer::to_variant(*next,
pretty_output,
[]( account_name n ) { return optional<abi_serializer>(); },
deadline);
const auto block_id = next->id();
const uint32_t ref_block_prefix = block_id._hash[1];
const auto enhanced_object = fc::mutable_variant_object
("block_num",next->block_num())
("id", block_id)
("ref_block_prefix", ref_block_prefix)
(pretty_output.get_object());
fc::variant v(std::move(enhanced_object));
if (no_pretty_print)
fc::json::to_stream(*out, v, fc::json::stringify_large_ints_and_doubles);
else
*out << fc::json::to_pretty_string(v) << "\n";
};
while((block_num <= last_block) && (next = block_logger.read_block_by_num( block_num ))) {
print_block(next);
++block_num;
out->flush();
}
if (!reversible_blocks) {
return;
}
const reversible_block_object* obj = nullptr;
while( (block_num <= last_block) && (obj = reversible_blocks->find<reversible_block_object,by_num>(block_num)) ) {
auto next = obj->get_block();
print_block(next);
++block_num;
}
}
void blocklog::set_program_options(options_description& cli)
{
cli.add_options()
("blocks-dir", bpo::value<bfs::path>()->default_value("blocks"),
"the location of the blocks directory (absolute path or relative to the current directory)")
("output-file,o", bpo::value<bfs::path>(),
"the file to write the block log output to (absolute or relative path). If not specified then output is to stdout.")
("first", bpo::value<uint32_t>(&first_block)->default_value(1),
"the first block number to log")
("last", bpo::value<uint32_t>(&last_block)->default_value(std::numeric_limits<uint32_t>::max()),
"the last block number (inclusive) to log")
("no-pretty-print", bpo::bool_switch(&no_pretty_print)->default_value(false),
"Do not pretty print the output. Useful if piping to jq to improve performance.")
("help", "Print this help message and exit.")
;
}
void blocklog::initialize(const variables_map& options) {
try {
auto bld = options.at( "blocks-dir" ).as<bfs::path>();
if( bld.is_relative())
blocks_dir = bfs::current_path() / bld;
else
blocks_dir = bld;
if (options.count( "output-file" )) {
bld = options.at( "output-file" ).as<bfs::path>();
if( bld.is_relative())
output_file = bfs::current_path() / bld;
else
output_file = bld;
}
} FC_LOG_AND_RETHROW()
}
int main(int argc, char** argv)
{
std::ios::sync_with_stdio(false); // for potential performance boost for large block log files
options_description cli ("eosio-blocklog command line options");
try {
blocklog blog;
blog.set_program_options(cli);
variables_map vmap;
bpo::store(bpo::parse_command_line(argc, argv, cli), vmap);
bpo::notify(vmap);
if (vmap.count("help") > 0) {
cli.print(std::cerr);
return 0;
}
blog.initialize(vmap);
blog.read_log();
} catch( const fc::exception& e ) {
elog( "${e}", ("e", e.to_detail_string()));
return -1;
} catch( const boost::exception& e ) {
elog("${e}", ("e",boost::diagnostic_information(e)));
return -1;
} catch( const std::exception& e ) {
elog("${e}", ("e",e.what()));
return -1;
} catch( ... ) {
elog("unknown exception");
return -1;
}
return 0;
}
......@@ -426,6 +426,7 @@ struct launcher_def {
string start_temp;
string start_script;
fc::optional<uint32_t> max_block_cpu_usage;
fc::optional<uint32_t> max_transaction_cpu_usage;
eosio::chain::genesis_state genesis_from_file;
void assign_name (eosd_def &node, bool is_bios);
......@@ -500,7 +501,8 @@ launcher_def::set_options (bpo::options_description &cfg) {
("gelf-endpoint",bpo::value<string>(&gelf_endpoint)->default_value("10.160.11.21:12201"),"hostname:port or ip:port of GELF endpoint")
("template",bpo::value<string>(&start_temp)->default_value("testnet.template"),"the startup script template")
("script",bpo::value<string>(&start_script)->default_value("bios_boot.sh"),"the generated startup script name")
("max-block-cpu-usage",bpo::value<uint32_t>(),"")
("max-block-cpu-usage",bpo::value<uint32_t>(),"Provide the \"max-block-cpu-usage\" value to use in the genesis.json file")
("max-transaction-cpu-usage",bpo::value<uint32_t>(),"Provide the \"max-transaction-cpu-usage\" value to use in the genesis.json file")
;
}
......@@ -536,6 +538,10 @@ launcher_def::initialize (const variables_map &vmap) {
max_block_cpu_usage = vmap["max-block-cpu-usage"].as<uint32_t>();
}
if (vmap.count("max-transaction-cpu-usage")) {
max_transaction_cpu_usage = vmap["max-transaction-cpu-usage"].as<uint32_t>();
}
genesis = vmap["genesis"].as<string>();
if (vmap.count("host-map")) {
host_map_file = vmap["host-map"].as<string>();
......@@ -1184,6 +1190,8 @@ launcher_def::init_genesis () {
genesis_from_file.initial_key = public_key_type(bioskey);
if (max_block_cpu_usage)
genesis_from_file.initial_configuration.max_block_cpu_usage = *max_block_cpu_usage;
if (max_transaction_cpu_usage)
genesis_from_file.initial_configuration.max_transaction_cpu_usage = *max_transaction_cpu_usage;
}
void
......
......@@ -23,7 +23,7 @@
exit 1
fi
if [ "${OS_VER}" -lt 2017 ]; then
if [[ "${OS_NAME}" == "Amazon Linux AMI" && "${OS_VER}" -lt 2017 ]]; then
printf "\\tYou must be running Amazon Linux 2017.09 or higher to install EOSIO.\\n"
printf "\\texiting now.\\n"
exit 1
......@@ -53,9 +53,15 @@
fi
printf "\\t%s\\n" "${UPDATE}"
DEP_ARRAY=( git gcc72.x86_64 gcc72-c++.x86_64 autoconf automake libtool make bzip2 \
bzip2-devel.x86_64 openssl-devel.x86_64 gmp-devel.x86_64 libstdc++72.x86_64 \
python27.x86_64 python36-devel.x86_64 libedit-devel.x86_64 doxygen.x86_64 graphviz.x86_64)
if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then
DEP_ARRAY=( git gcc72.x86_64 gcc72-c++.x86_64 autoconf automake libtool make bzip2 \
bzip2-devel.x86_64 openssl-devel.x86_64 gmp-devel.x86_64 libstdc++72.x86_64 \
python27.x86_64 python36-devel.x86_64 libedit-devel.x86_64 doxygen.x86_64 graphviz.x86_64)
else
DEP_ARRAY=( git gcc.x86_64 gcc-c++.x86_64 autoconf automake libtool make bzip2 \
bzip2-devel.x86_64 openssl-devel.x86_64 gmp-devel.x86_64 libstdc++.x86_64 \
python3.x86_64 python3-devel.x86_64 libedit-devel.x86_64 doxygen.x86_64 graphviz.x86_64)
fi
COUNT=1
DISPLAY=""
DEP=""
......
此差异已折叠。
......@@ -81,20 +81,24 @@ add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity
set_property(TEST nodeos_sanity_lr_test PROPERTY LABELS long_running_tests)
add_test(NAME bnet_nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST bnet_nodeos_sanity_lr_test PROPERTY LABELS long_running_tests)
add_test(NAME nodeos_run_check_lr_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST nodeos_run_check_lr_test PROPERTY LABELS long_running_tests)
add_test(NAME nodeos_run_check2_lr_test COMMAND tests/nodeos_run_test.py -v --wallet-port 9900 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST nodeos_run_check2_lr_test PROPERTY LABELS long_running_tests)
#add_test(NAME distributed_transactions_lr_test COMMAND tests/distributed-transactions-test.py -d 2 -p 21 -n 21 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
#set_property(TEST distributed_transactions_lr_test PROPERTY LABELS long_running_tests)
add_test(NAME nodeos_forked_chain_lr_test COMMAND tests/nodeos_forked_chain_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME nodeos_forked_chain_lr_test COMMAND tests/nodeos_forked_chain_test.py -v --wallet-port 9901 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST nodeos_forked_chain_lr_test PROPERTY LABELS long_running_tests)
add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9902 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests)
add_test(NAME bnet_nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME bnet_nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9903 --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST bnet_nodeos_voting_lr_test PROPERTY LABELS long_running_tests)
add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR})
set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests)
......
......@@ -172,7 +172,9 @@ class Cluster(object):
cmdArr.append(arg)
cmdArr.append("--max-block-cpu-usage")
cmdArr.append(str(2000000))
cmdArr.append(str(160000000))
cmdArr.append("--max-transaction-cpu-usage")
cmdArr.append(str(150000000))
# must be last cmdArr.append before subprocess.call, so that everything is on the command line
# before constructing the shape.json file for "bridge"
......@@ -1274,6 +1276,8 @@ class Cluster(object):
for i in range(0, len(self.nodes)):
fileName="etc/eosio/node_%02d/config.ini" % (i)
Cluster.dumpErrorDetailImpl(fileName)
fileName="etc/eosio/node_%02d/genesis.json" % (i)
Cluster.dumpErrorDetailImpl(fileName)
fileName="var/lib/node_%02d/stderr.txt" % (i)
Cluster.dumpErrorDetailImpl(fileName)
......
......@@ -26,6 +26,7 @@ class AppArgs:
class TestHelper(object):
LOCAL_HOST="localhost"
DEFAULT_PORT=8888
DEFAULT_WALLET_PORT=9899
@staticmethod
# pylint: disable=too-many-branches
......@@ -70,6 +71,12 @@ class TestHelper(object):
if "--port" in includeArgs:
parser.add_argument("-p", "--port", type=int, help="%s host port" % Utils.EosServerName,
default=TestHelper.DEFAULT_PORT)
if "--wallet-host" in includeArgs:
parser.add_argument("--wallet-host", type=str, help="%s host" % Utils.EosWalletName,
default=TestHelper.LOCAL_HOST)
if "--wallet-port" in includeArgs:
parser.add_argument("--wallet-port", type=int, help="%s port" % Utils.EosWalletName,
default=TestHelper.DEFAULT_WALLET_PORT)
if "--prod-count" in includeArgs:
parser.add_argument("-c", "--prod-count", type=int, help="Per node producer count", default=1)
if "--defproducera_prvt_key" in includeArgs:
......
......@@ -12,7 +12,8 @@ from testUtils import Utils
Wallet=namedtuple("Wallet", "name password host port")
# pylint: disable=too-many-instance-attributes
class WalletMgr(object):
__walletLogFile="test_keosd_output.log"
__walletLogOutFile="test_keosd_out.log"
__walletLogErrFile="test_keosd_err.log"
__walletDataDir="test_wallet_0"
__MaxPort=9999
......@@ -64,16 +65,12 @@ class WalletMgr(object):
if self.isLocal():
self.port=self.findAvailablePort()
pgrepCmd=Utils.pgrepCmd(Utils.EosWalletName)
if Utils.Debug:
portStatus="N/A"
portTaken=False
if self.isLocal():
if Utils.arePortsAvailable(self.port):
portStatus="AVAILABLE"
if not Utils.arePortsAvailable(self.port):
portTaken=True
else:
portStatus="NOT AVAILABLE"
pgrepCmd=Utils.pgrepCmd(Utils.EosWalletName)
psOut=Utils.checkOutput(pgrepCmd.split(), ignoreError=True)
if psOut or portTaken:
statusMsg=""
......@@ -86,7 +83,7 @@ class WalletMgr(object):
cmd="%s --data-dir %s --config-dir %s --http-server-address=%s:%d --verbose-http-errors" % (
Utils.EosWalletPath, WalletMgr.__walletDataDir, WalletMgr.__walletDataDir, self.host, self.port)
if Utils.Debug: Utils.Print("cmd: %s" % (cmd))
with open(WalletMgr.__walletLogFile, 'w') as sout, open(WalletMgr.__walletLogFile, 'w') as serr:
with open(WalletMgr.__walletLogOutFile, 'w') as sout, open(WalletMgr.__walletLogErrFile, 'w') as serr:
popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr)
self.__walletPid=popen.pid
......@@ -94,10 +91,11 @@ class WalletMgr(object):
time.sleep(2)
try:
if Utils.Debug: Utils.Print("Checking if %s launched. %s" % (Utils.EosWalletName, pgrepCmd))
psOut=Utils.checkOutput(pgrepCmd.split())
if Utils.Debug: Utils.Print("Launched %s. %s - {%s}" % (Utils.EosWalletName, pgrepCmd, psOut))
if Utils.Debug: Utils.Print("Launched %s. {%s}" % (Utils.EosWalletName, psOut))
except subprocess.CalledProcessError as ex:
Utils.errorExit("Failed to launch the wallet manager on")
Utils.errorExit("Failed to launch the wallet manager")
return True
......@@ -276,9 +274,13 @@ class WalletMgr(object):
def dumpErrorDetails(self):
Utils.Print("=================================================================")
if self.__walletPid is not None:
Utils.Print("Contents of %s:" % (WalletMgr.__walletLogFile))
Utils.Print("Contents of %s:" % (WalletMgr.__walletLogOutFile))
Utils.Print("=================================================================")
with open(WalletMgr.__walletLogOutFile, "r") as f:
shutil.copyfileobj(f, sys.stdout)
Utils.Print("Contents of %s:" % (WalletMgr.__walletLogErrFile))
Utils.Print("=================================================================")
with open(WalletMgr.__walletLogFile, "r") as f:
with open(WalletMgr.__walletLogErrFile, "r") as f:
shutil.copyfileobj(f, sys.stdout)
def killall(self, allInstances=False):
......
......@@ -102,7 +102,8 @@ def getMinHeadAndLib(prodNodes):
args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--p2p-plugin"})
args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run",
"--p2p-plugin","--wallet-port"})
Utils.Debug=args.v
totalProducerNodes=2
totalNonProducerNodes=1
......@@ -116,8 +117,9 @@ dontKill=args.leave_running
prodCount=args.prod_count
killAll=args.clean_run
p2pPlugin=args.p2p_plugin
walletPort=args.wallet_port
walletMgr=WalletMgr(True)
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killEosInstances=not dontKill
killWallet=not dontKill
......
......@@ -22,7 +22,7 @@ from core_symbol import CORE_SYMBOL
args = TestHelper.parse_args({"--host","--port","--prod-count","--defproducera_prvt_key","--defproducerb_prvt_key","--mongodb"
,"--dump-error-details","--dont-launch","--keep-logs","-v","--leave-running","--only-bios","--clean-run"
,"--sanity-test","--p2p-plugin"})
,"--sanity-test","--p2p-plugin","--wallet-port"})
server=args.host
port=args.port
debug=args.v
......@@ -38,15 +38,16 @@ onlyBios=args.only_bios
killAll=args.clean_run
sanityTest=args.sanity_test
p2pPlugin=args.p2p_plugin
walletPort=args.wallet_port
Utils.Debug=debug
localTest=True if server == TestHelper.LOCAL_HOST else False
cluster=Cluster(walletd=True, enableMongo=enableMongo, defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey)
walletMgr=WalletMgr(True)
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killEosInstances=not dontKill
killWallet=not dontKill
dontBootstrap=sanityTest
dontBootstrap=sanityTest # intent is to limit the scope of the sanity test to just verifying that nodes can be started
WalletdName=Utils.EosWalletName
ClientName="cleos"
......
......@@ -54,7 +54,7 @@ class NamedAccounts:
# --keep-logs <Don't delete var/lib/node_* folders upon test completion>
###############################################################
args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run"})
args = TestHelper.parse_args({"--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--wallet-port"})
Utils.Debug=args.v
totalNodes=4
cluster=Cluster(walletd=True)
......@@ -62,8 +62,9 @@ dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontKill=args.leave_running
killAll=args.clean_run
walletPort=args.wallet_port
walletMgr=WalletMgr(True)
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killEosInstances=not dontKill
killWallet=not dontKill
......
......@@ -140,7 +140,8 @@ errorExit=Utils.errorExit
from core_symbol import CORE_SYMBOL
args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run","--p2p-plugin"})
args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run",
"--p2p-plugin","--wallet-port"})
Utils.Debug=args.v
totalNodes=4
cluster=Cluster(walletd=True)
......@@ -150,8 +151,9 @@ dontKill=args.leave_running
prodCount=args.prod_count
killAll=args.clean_run
p2pPlugin=args.p2p_plugin
walletPort=args.wallet_port
walletMgr=WalletMgr(True)
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killEosInstances=not dontKill
killWallet=not dontKill
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册