提交 50f6f40b 编写于 作者: A arhag

Merge branch 'develop' into 5589-fix-get-transaction-bug

......@@ -27,7 +27,7 @@ set( CXX_STANDARD_REQUIRED ON)
set(VERSION_MAJOR 1)
set(VERSION_MINOR 2)
set(VERSION_PATCH 3)
set(VERSION_PATCH 5)
set( CLI_CLIENT_EXECUTABLE_NAME cleos )
set( NODE_EXECUTABLE_NAME nodeos )
......
......@@ -20,10 +20,10 @@ cd eos/Docker
docker build . -t eosio/eos
```
The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.2.3 tag, you could do the following:
The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.2.5 tag, you could do the following:
```bash
docker build -t eosio/eos:v1.2.3 --build-arg branch=v1.2.3 .
docker build -t eosio/eos:v1.2.5 --build-arg branch=v1.2.5 .
```
By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image.
......
......@@ -50,23 +50,9 @@ RUN git clone --depth 1 --single-branch --branch release_40 https://github.com/l
&& cmake --build build --target install \
&& cd .. && rm -rf llvm
RUN wget https://github.com/WebAssembly/binaryen/archive/1.37.21.tar.gz -O - | tar -xz \
&& cd binaryen-1.37.21 \
&& cmake -H. -Bbuild -GNinja -DCMAKE_BUILD_TYPE=Release \
&& cmake --build build --target install \
&& cd .. && rm -rf binaryen-1.37.21
RUN git clone --depth 1 -b releases/v3.3 https://github.com/mongodb/mongo-cxx-driver \
&& cd mongo-cxx-driver/build \
&& cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local .. \
&& make -j$(nproc) \
&& make install \
&& cd ../../ && rm -rf mongo-cxx-driver
RUN git clone --depth 1 --single-branch --branch master https://github.com/ucb-bar/berkeley-softfloat-3.git \
&& cd berkeley-softfloat-3/build/Linux-x86_64-GCC \
&& make -j${nproc} SPECIALIZE_TYPE="8086-SSE" SOFTFLOAT_OPS="-DSOFTFLOAT_ROUND_EVEN -DINLINE_LEVEL=5 -DSOFTFLOAT_FAST_DIV32TO16 -DSOFTFLOAT_FAST_DIV64TO32" \
&& mkdir -p /opt/berkeley-softfloat-3 && cp softfloat.a /opt/berkeley-softfloat-3/libsoftfloat.a \
&& mv ../../source/include /opt/berkeley-softfloat-3/include && cd - && rm -rf berkeley-softfloat-3
ENV SOFTFLOAT_ROOT /opt/berkeley-softfloat-3
......@@ -29,7 +29,11 @@
#
# https://github.com/EOSIO/eos/blob/master/LICENSE.txt
##########################################################################
if [ "$(id -u)" -ne 0 ]; then
printf "\n\tThis requires sudo. Please run with sudo.\n\n"
exit -1
fi
CWD="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
if [ "${CWD}" != "${PWD}" ]; then
......
......@@ -26,9 +26,14 @@ if [ -d "/usr/local/eosio" ]; then
for binary in ${binaries[@]}; do
rm ${binary}
done
# Handle cleanup of directories created from installation
if [ "$1" == "--full" ]; then
if [ -d ~/Library/Application\ Support/eosio ]; then rm -rf ~/Library/Application\ Support/eosio; fi # Mac OS
if [ -d ~/.local/share/eosio ]; then rm -rf ~/.local/share/eosio; fi # Linux
fi
popd &> /dev/null
break;;
[Nn]* )
[Nn]* )
printf "\tAborting uninstall\n\n"
exit -1;;
esac
......
......@@ -1768,6 +1768,10 @@ void controller::drop_unapplied_transaction(const transaction_metadata_ptr& trx)
my->unapplied_transactions.erase(trx->signed_id);
}
void controller::drop_all_unapplied_transactions() {
my->unapplied_transactions.clear();
}
vector<transaction_id_type> controller::get_scheduled_transactions() const {
const auto& idx = db().get_index<generated_transaction_multi_index,by_delay>();
......
......@@ -288,21 +288,25 @@ namespace impl {
mvo("name", act.name);
mvo("authorization", act.authorization);
auto abi = resolver(act.account);
if (abi.valid()) {
auto type = abi->get_action_type(act.name);
if (!type.empty()) {
try {
mvo( "data", abi->_binary_to_variant( type, act.data, recursion_depth, deadline, max_serialization_time ));
mvo("hex_data", act.data);
} catch(...) {
// any failure to serialize data, then leave as not serailzed
try {
auto abi = resolver(act.account);
if (abi.valid()) {
auto type = abi->get_action_type(act.name);
if (!type.empty()) {
try {
mvo( "data", abi->_binary_to_variant( type, act.data, recursion_depth, deadline, max_serialization_time ));
mvo("hex_data", act.data);
} catch(...) {
// any failure to serialize data, then leave as not serailzed
mvo("data", act.data);
}
} else {
mvo("data", act.data);
}
} else {
mvo("data", act.data);
}
} else {
} catch(...) {
mvo("data", act.data);
}
out(name, std::move(mvo));
......
......@@ -108,6 +108,7 @@ namespace eosio { namespace chain {
*/
vector<transaction_metadata_ptr> get_unapplied_transactions() const;
void drop_unapplied_transaction(const transaction_metadata_ptr& trx);
void drop_all_unapplied_transactions();
/**
* These transaction IDs represent transactions available in the head chain state as scheduled
......
......@@ -303,7 +303,7 @@ namespace eosio {
"Track actions which match receiver:action:actor. Actor may be blank to include all. Action and Actor both blank allows all from Recieiver. Receiver may not be blank.")
;
cfg.add_options()
("filter-out,f", bpo::value<vector<string>>()->composing(),
("filter-out,F", bpo::value<vector<string>>()->composing(),
"Do not track actions which match receiver:action:actor. Action and Actor both blank excludes all from Reciever. Actor blank excludes all from reciever:action. Receiver may not be blank.")
;
}
......
......@@ -81,8 +81,6 @@ using transaction_id_with_expiry_index = multi_index_container<
>
>;
enum class pending_block_mode {
producing,
speculating
......@@ -885,6 +883,13 @@ fc::time_point producer_plugin_impl::calculate_pending_block_time() const {
return block_time;
}
enum class tx_category {
PERSISTED,
UNEXPIRED_UNPERSISTED,
EXPIRED,
};
producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool &last_block) {
chain::controller& chain = app().get_plugin<chain_plugin>().chain();
......@@ -985,71 +990,66 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool
try {
size_t orig_pending_txn_size = _pending_incoming_transactions.size();
if (!persisted_by_expiry.empty() || _pending_block_mode == pending_block_mode::producing) {
auto unapplied_trxs = chain.get_unapplied_transactions();
if (!persisted_by_expiry.empty()) {
for (auto itr = unapplied_trxs.begin(); itr != unapplied_trxs.end(); ++itr) {
const auto& trx = *itr;
if (persisted_by_id.find(trx->id) != persisted_by_id.end()) {
// this is a persisted transaction, push it into the block (even if we are speculating) with
// no deadline as it has already passed the subjective deadlines once and we want to represent
// the state of the chain including this transaction
try {
chain.push_transaction(trx, fc::time_point::maximum());
} catch ( const guard_exception& e ) {
app().get_plugin<chain_plugin>().handle_guard_exception(e);
return start_block_result::failed;
} FC_LOG_AND_DROP();
// remove it from further consideration as it is applied
*itr = nullptr;
}
}
}
if (_pending_block_mode == pending_block_mode::producing) {
for (const auto& trx : unapplied_trxs) {
if (block_time <= fc::time_point::now()) exhausted = true;
if (exhausted) {
break;
}
// Processing unapplied transactions...
//
if (_producers.empty() && persisted_by_id.empty()) {
// if this node can never produce and has no persisted transactions,
// there is no need for unapplied transactions they can be dropped
chain.drop_all_unapplied_transactions();
} else {
std::vector<transaction_metadata_ptr> apply_trxs;
{ // derive appliable transactions from unapplied_transactions and drop droppable transactions
auto unapplied_trxs = chain.get_unapplied_transactions();
apply_trxs.reserve(unapplied_trxs.size());
if (!trx) {
// nulled in the loop above, skip it
continue;
auto calculate_transaction_category = [&](const transaction_metadata_ptr& trx) {
if (trx->packed_trx.expiration() < pbs->header.timestamp.to_time_point()) {
return tx_category::EXPIRED;
} else if (persisted_by_id.find(trx->id) != persisted_by_id.end()) {
return tx_category::PERSISTED;
} else {
return tx_category::UNEXPIRED_UNPERSISTED;
}
};
if (trx->packed_trx.expiration() < pbs->header.timestamp.to_time_point()) {
// expired, drop it
for (auto& trx: unapplied_trxs) {
auto category = calculate_transaction_category(trx);
if (category == tx_category::EXPIRED || (category == tx_category::UNEXPIRED_UNPERSISTED && _producers.empty())) {
chain.drop_unapplied_transaction(trx);
continue;
} else if (category == tx_category::PERSISTED || (category == tx_category::UNEXPIRED_UNPERSISTED && _pending_block_mode == pending_block_mode::producing)) {
apply_trxs.emplace_back(std::move(trx));
}
}
}
try {
auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms);
bool deadline_is_subjective = false;
if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && block_time < deadline)) {
deadline_is_subjective = true;
deadline = block_time;
}
for (const auto& trx: apply_trxs) {
if (block_time <= fc::time_point::now()) exhausted = true;
if (exhausted) {
break;
}
auto trace = chain.push_transaction(trx, deadline);
if (trace->except) {
if (failure_is_subjective(*trace->except, deadline_is_subjective)) {
exhausted = true;
} else {
// this failed our configured maximum transaction time, we don't want to replay it
chain.drop_unapplied_transaction(trx);
}
try {
auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms);
bool deadline_is_subjective = false;
if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && block_time < deadline)) {
deadline_is_subjective = true;
deadline = block_time;
}
auto trace = chain.push_transaction(trx, deadline);
if (trace->except) {
if (failure_is_subjective(*trace->except, deadline_is_subjective)) {
exhausted = true;
} else {
// this failed our configured maximum transaction time, we don't want to replay it
chain.drop_unapplied_transaction(trx);
}
} catch ( const guard_exception& e ) {
app().get_plugin<chain_plugin>().handle_guard_exception(e);
return start_block_result::failed;
} FC_LOG_AND_DROP();
}
}
} catch ( const guard_exception& e ) {
app().get_plugin<chain_plugin>().handle_guard_exception(e);
return start_block_result::failed;
} FC_LOG_AND_DROP();
}
}
if (_pending_block_mode == pending_block_mode::producing) {
......@@ -1119,7 +1119,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool
} else {
// attempt to apply any pending incoming transactions
_incoming_trx_weight = 0.0;
if (orig_pending_txn_size && _pending_incoming_transactions.size()) {
while (orig_pending_txn_size && _pending_incoming_transactions.size()) {
auto e = _pending_incoming_transactions.front();
_pending_incoming_transactions.pop_front();
--orig_pending_txn_size;
......
......@@ -6,6 +6,7 @@
#include <eosio/chain/transaction.hpp>
#include <eosio/wallet_plugin/wallet_api.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/interprocess/sync/file_lock.hpp>
#include <chrono>
namespace fc { class variant; }
......@@ -24,11 +25,14 @@ public:
wallet_manager(wallet_manager&&) = delete;
wallet_manager& operator=(const wallet_manager&) = delete;
wallet_manager& operator=(wallet_manager&&) = delete;
~wallet_manager() = default;
~wallet_manager();
/// Set the path for location of wallet files.
/// @param p path to override default ./ location of wallet files.
void set_dir(const boost::filesystem::path& p) { dir = p; }
void set_dir(const boost::filesystem::path& p) {
dir = p;
initialize_lock();
}
/// Set the timeout for locking all wallets.
/// If set then after t seconds of inactivity then lock_all().
......@@ -135,6 +139,10 @@ private:
std::chrono::seconds timeout = std::chrono::seconds::max(); ///< how long to wait before calling lock_all()
mutable timepoint_t timeout_time = timepoint_t::max(); ///< when to call lock_all()
boost::filesystem::path dir = ".";
boost::filesystem::path lock_path = dir / "wallet.lock";
std::unique_ptr<boost::interprocess::file_lock> wallet_dir_lock;
void initialize_lock();
};
} // namespace wallet
......
......@@ -33,6 +33,12 @@ wallet_manager::wallet_manager() {
#endif
}
wallet_manager::~wallet_manager() {
//not really required, but may spook users
if(wallet_dir_lock)
boost::filesystem::remove(lock_path);
}
void wallet_manager::set_timeout(const std::chrono::seconds& t) {
timeout = t;
auto now = std::chrono::system_clock::now();
......@@ -269,5 +275,20 @@ void wallet_manager::own_and_use_wallet(const string& name, std::unique_ptr<wall
wallets.emplace(name, std::move(wallet));
}
void wallet_manager::initialize_lock() {
//This is technically somewhat racy in here -- if multiple keosd are in this function at once.
//I've considered that an acceptable tradeoff to maintain cross-platform boost constructs here
lock_path = dir / "wallet.lock";
{
std::ofstream x(lock_path.string());
EOS_ASSERT(!x.fail(), wallet_exception, "Failed to open wallet lock file at ${f}", ("f", lock_path.string()));
}
wallet_dir_lock = std::make_unique<boost::interprocess::file_lock>(lock_path.string().c_str());
if(!wallet_dir_lock->try_lock()) {
wallet_dir_lock.reset();
EOS_THROW(wallet_exception, "Failed to lock access to wallet directory; is another keosd running?");
}
}
} // namespace wallet
} // namespace eosio
......@@ -619,7 +619,7 @@ launcher_def::initialize (const variables_map &vmap) {
stage = bfs::path(erd);
if (!bfs::exists(stage)) {
cerr << erd << " is not a valid path" << endl;
cerr << "\"" << erd << "\" is not a valid path. Please ensure environment variable EOSIO_HOME is set to the build path." << endl;
exit (-1);
}
stage /= bfs::path("staging");
......
......@@ -15,10 +15,12 @@ include_directories("${CMAKE_SOURCE_DIR}/plugins/wallet_plugin/include")
file(GLOB UNIT_TESTS "*.cpp")
add_executable( plugin_test ${UNIT_TESTS} ${WASM_UNIT_TESTS} main.cpp)
add_executable( plugin_test ${UNIT_TESTS} ${WASM_UNIT_TESTS} )
target_link_libraries( plugin_test eosio_testing eosio_chain chainbase eos_utilities chain_plugin wallet_plugin abi_generator fc ${PLATFORM_SPECIFIC_LIBS} )
target_include_directories( plugin_test PUBLIC ${CMAKE_SOURCE_DIR}/plugins/net_plugin/include )
target_include_directories( plugin_test PUBLIC
${CMAKE_SOURCE_DIR}/plugins/net_plugin/include
${CMAKE_SOURCE_DIR}/plugins/chain_plugin/include )
add_dependencies(plugin_test asserter test_api test_api_mem test_api_db test_api_multi_index proxy identity identity_test stltest infinite eosio.system eosio.token eosio.bios test.inline multi_index_test noop dice eosio.msig)
#
......
......@@ -130,9 +130,13 @@ class Cluster(object):
assert(isinstance(totalProducers, (str,int)))
producerFlag="--producers %s" % (totalProducers)
if not Cluster.arePortsAvailable(set(range(self.port, self.port+totalNodes+1))):
Utils.Print("ERROR: Another process is listening on nodeos default port.")
return False
tries = 30
while not Cluster.arePortsAvailable(set(range(self.port, self.port+totalNodes+1))):
Utils.Print("ERROR: Another process is listening on nodeos default port. wait...")
if tries == 0:
return False
tries = tries - 1
time.sleep(2)
cmd="%s -p %s -n %s -d %s -i %s -f --p2p-plugin %s %s" % (
Utils.EosLauncherPath, pnodes, totalNodes, delay, datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3],
......@@ -292,7 +296,7 @@ class Cluster(object):
s=" ".join(cmdArr)
if Utils.Debug: Utils.Print("cmd: %s" % (s))
if 0 != subprocess.call(cmdArr):
Utils.Print("ERROR: Launcher failed to launch.")
Utils.Print("ERROR: Launcher failed to launch. failed cmd: %s" % (s))
return False
self.nodes=list(range(totalNodes)) # placeholder for cleanup purposes only
......
#include <boost/test/unit_test.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include <eosio/testing/tester.hpp>
#include <eosio/chain/abi_serializer.hpp>
#include <eosio/chain/wasm_eosio_constraints.hpp>
#include <eosio/chain/resource_limits.hpp>
#include <eosio/chain/exceptions.hpp>
#include <eosio/chain/wast_to_wasm.hpp>
#include <eosio/chain_plugin/chain_plugin.hpp>
#include <asserter/asserter.wast.hpp>
#include <asserter/asserter.abi.hpp>
#include <stltest/stltest.wast.hpp>
#include <stltest/stltest.abi.hpp>
#include <noop/noop.wast.hpp>
#include <noop/noop.abi.hpp>
#include <eosio.system/eosio.system.wast.hpp>
#include <eosio.system/eosio.system.abi.hpp>
#include <fc/io/fstream.hpp>
#include <Runtime/Runtime.h>
#include <fc/variant_object.hpp>
#include <fc/io/json.hpp>
#include <array>
#include <utility>
#ifdef NON_VALIDATING_TEST
#define TESTER tester
#else
#define TESTER validating_tester
#endif
using namespace eosio;
using namespace eosio::chain;
using namespace eosio::testing;
using namespace fc;
BOOST_AUTO_TEST_SUITE(chain_plugin_tests)
BOOST_FIXTURE_TEST_CASE( get_block_with_invalid_abi, TESTER ) try {
produce_blocks(2);
create_accounts( {N(asserter)} );
produce_block();
// setup contract and abi
set_code(N(asserter), asserter_wast);
set_abi(N(asserter), asserter_abi);
produce_blocks(1);
auto resolver = [&,this]( const account_name& name ) -> optional<abi_serializer> {
try {
const auto& accnt = this->control->db().get<account_object,by_name>( name );
abi_def abi;
if (abi_serializer::to_abi(accnt.abi, abi)) {
return abi_serializer(abi, abi_serializer_max_time);
}
return optional<abi_serializer>();
} FC_RETHROW_EXCEPTIONS(error, "resolver failed at chain_plugin_tests::abi_invalid_type");
};
// abi should be resolved
BOOST_REQUIRE_EQUAL(true, resolver(N(asserter)).valid());
// make an action using the valid contract & abi
variant pretty_trx = mutable_variant_object()
("actions", variants({
mutable_variant_object()
("account", "asserter")
("name", "procassert")
("authorization", variants({
mutable_variant_object()
("actor", "asserter")
("permission", name(config::active_name).to_string())
}))
("data", mutable_variant_object()
("condition", 1)
("message", "Should Not Assert!")
)
})
);
signed_transaction trx;
abi_serializer::from_variant(pretty_trx, trx, resolver, abi_serializer_max_time);
set_transaction_headers(trx);
trx.sign( get_private_key( N(asserter), "active" ), control->get_chain_id() );
push_transaction( trx );
produce_blocks(1);
// retrieve block num
uint32_t headnum = this->control->head_block_num();
char headnumstr[20];
sprintf(headnumstr, "%d", headnum);
chain_apis::read_only::get_block_params param{headnumstr};
chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX));
// block should be decoded successfully
std::string block_str = json::to_pretty_string(plugin.get_block(param));
BOOST_TEST(block_str.find("procassert") != std::string::npos);
BOOST_TEST(block_str.find("condition") != std::string::npos);
BOOST_TEST(block_str.find("Should Not Assert!") != std::string::npos);
BOOST_TEST(block_str.find("011253686f756c64204e6f742041737365727421") != std::string::npos); //action data
// set an invalid abi (int8->xxxx)
std::string abi2 = asserter_abi;
auto pos = abi2.find("int8");
BOOST_TEST(pos != std::string::npos);
abi2.replace(pos, 4, "xxxx");
set_abi(N(asserter), abi2.c_str());
produce_blocks(1);
// resolving the invalid abi result in exception
BOOST_CHECK_THROW(resolver(N(asserter)), invalid_type_inside_abi);
// get the same block as string, results in decode failed(invalid abi) but not exception
std::string block_str2 = json::to_pretty_string(plugin.get_block(param));
BOOST_TEST(block_str2.find("procassert") != std::string::npos);
BOOST_TEST(block_str2.find("condition") == std::string::npos); // decode failed
BOOST_TEST(block_str2.find("Should Not Assert!") == std::string::npos); // decode failed
BOOST_TEST(block_str2.find("011253686f756c64204e6f742041737365727421") != std::string::npos); //action data
} FC_LOG_AND_RETHROW() /// get_block_with_invalid_abi
BOOST_AUTO_TEST_SUITE_END()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册