提交 c5b0ec21 编写于 作者: T Todd Fleming

Merge remote-tracking branch 'origin/master' into bios-boot-tutorial-more

......@@ -8,7 +8,7 @@ RUN echo 'APT::Install-Recommends 0;' >> /etc/apt/apt.conf.d/01norecommends \
&& apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install -y sudo wget curl net-tools ca-certificates unzip gnupg
RUN echo "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-4.0 main" >> /etc/apt/sources.list.d/llvm.list \
RUN echo "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic main" >> /etc/apt/sources.list.d/llvm.list \
&& wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add - \
&& apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install -y git-core automake autoconf libtool build-essential pkg-config libtool \
......
......@@ -26,9 +26,12 @@ namespace eosio {
void setprods( std::vector<eosio::producer_key> schedule ) {
(void)schedule; // schedule argument just forces the deserialization of the action data into vector<producer_key> (necessary check)
require_auth( _self );
char buffer[action_data_size()];
read_action_data( buffer, sizeof(buffer) ); // should be the same data as eosio::pack(schedule)
set_proposed_producers(buffer, sizeof(buffer));
constexpr size_t max_stack_buffer_size = 512;
size_t size = action_data_size();
char* buffer = (char*)( max_stack_buffer_size < size ? malloc(size) : alloca(size) );
read_action_data( buffer, size );
set_proposed_producers(buffer, size);
}
void reqauth( action_name from ) {
......
......@@ -122,8 +122,8 @@ namespace eosiosystem {
int64_t bytes_out;
auto itr = _rammarket.find(S(4,RAMCORE));
_rammarket.modify( itr, 0, [&]( auto& es ) {
const auto& market = _rammarket.get(S(4,RAMCORE), "ram market does not exist");
_rammarket.modify( market, 0, [&]( auto& es ) {
bytes_out = es.convert( quant_after_fee, S(0,RAM) ).amount;
});
......
......@@ -44,9 +44,11 @@ namespace eosio {
*/
template<typename T>
T unpack_action_data() {
char buffer[action_data_size()];
read_action_data( buffer, sizeof(buffer) );
return unpack<T>( buffer, sizeof(buffer) );
constexpr size_t max_stack_buffer_size = 512;
size_t size = action_data_size();
char* buffer = (char*)( max_stack_buffer_size < size ? malloc(size) : alloca(size) );
read_action_data( buffer, size );
return unpack<T>( buffer, size );
}
using ::require_auth;
......
......@@ -319,14 +319,14 @@ class multi_index
return lb;
}
const T& get( secondary_key_type&& secondary )const {
return get( secondary );
const T& get( secondary_key_type&& secondary, const char* error_msg = "unable to find secondary key" )const {
return get( secondary, error_msg );
}
// Gets the object with the smallest primary key in the case where the secondary key is not unique.
const T& get( const secondary_key_type& secondary )const {
const T& get( const secondary_key_type& secondary, const char* error_msg = "unable to find secondary key" )const {
auto result = find( secondary );
eosio_assert( result != cend(), "unable to find secondary key" );
eosio_assert( result != cend(), error_msg );
return *result;
}
......@@ -751,7 +751,7 @@ class multi_index
});
}
const T& get( uint64_t primary, const char* error_msg = "unable to find key" )const {
const T& get( uint64_t primary, const char* error_msg = "unable to find key" )const {
auto result = find( primary );
eosio_assert( result != cend(), error_msg );
return *result;
......
......@@ -76,12 +76,14 @@ namespace eosio {
* @return the indicated action
*/
inline action get_action( uint32_t type, uint32_t index ) {
auto size = ::get_action(type, index, nullptr, 0);
eosio_assert( size > 0, "get_action size failed" );
char buf[size];
auto size2 = ::get_action(type, index, &buf[0], static_cast<size_t>(size) );
eosio_assert( size == size2, "get_action failed" );
return eosio::unpack<eosio::action>(&buf[0], static_cast<size_t>(size));
constexpr size_t max_stack_buffer_size = 512;
int s = ::get_action( type, index, nullptr, 0 );
eosio_assert( s > 0, "get_action size failed" );
size_t size = static_cast<size_t>(s);
char* buffer = (char*)( max_stack_buffer_size < size ? malloc(size) : alloca(size) );
auto size2 = ::get_action( type, index, buffer, size );
eosio_assert( size == static_cast<size_t>(size2), "get_action failed" );
return eosio::unpack<eosio::action>( buffer, size );
}
///@} transactioncpp api
......
......@@ -871,13 +871,14 @@ struct controller_impl {
if (except) {
elog("exception thrown while switching forks ${e}", ("e",except->to_detail_string()));
while (ritr != branches.first.rend() ) {
fork_db.set_validity( *ritr, false );
++ritr;
}
// ritr currently points to the block that threw
// if we mark it invalid it will automatically remove all forks built off it.
fork_db.set_validity( *ritr, false );
// pop all blocks from the bad fork
for( auto itr = (ritr + 1).base(); itr != branches.second.end(); ++itr ) {
// ritr base is a forward itr to the last block successfully applied
auto applied_itr = ritr.base();
for( auto itr = applied_itr; itr != branches.first.end(); ++itr ) {
fork_db.mark_in_current_chain( *itr , false );
pop_block();
}
......
......@@ -209,14 +209,14 @@ namespace eosio { namespace chain {
my->index.erase(itr);
auto& previdx = my->index.get<by_prev>();
auto previtr = previdx.find(id);
while( previtr != previdx.end() ) {
auto previtr = previdx.lower_bound(remove_queue[i]);
while( previtr != previdx.end() && (*previtr)->header.previous == remove_queue[i] ) {
remove_queue.push_back( (*previtr)->id );
previdx.erase(previtr);
previtr = previdx.find(id);
++previtr;
}
}
//wdump((my->index.size()));
my->head = *my->index.get<by_lib_block_num>().begin();
}
void fork_database::set_validity( const block_state_ptr& h, bool valid ) {
......
......@@ -98,7 +98,12 @@ class incremental_merkle_impl {
:_node_count(0)
{}
template<typename Allocator>
incremental_merkle_impl( const incremental_merkle_impl& ) = default;
incremental_merkle_impl( incremental_merkle_impl&& ) = default;
incremental_merkle_impl& operator= (const incremental_merkle_impl& ) = default;
incremental_merkle_impl& operator= ( incremental_merkle_impl&& ) = default;
template<typename Allocator, std::enable_if_t<!std::is_same<std::decay_t<Allocator>, incremental_merkle_impl>::value, int> = 0>
incremental_merkle_impl( Allocator&& alloc ):_active_nodes(forward<Allocator>(alloc)){}
/*
......
......@@ -114,6 +114,7 @@ struct interpreter_interface : ModuleInstance::ExternalInterface {
}
void growMemory(Address old_size, Address new_size) override {
memset(memory.data + old_size.addr, 0, new_size.addr - old_size.addr);
current_memory_size += new_size.addr - old_size.addr;
}
......
......@@ -114,6 +114,7 @@ namespace Runtime
{
return -1;
}
memset(memory->baseAddress + (memory->numPages << IR::numBytesPerPageLog2), 0, numNewPages << IR::numBytesPerPageLog2);
memory->numPages += numNewPages;
}
return previousNumPages;
......
......@@ -563,3 +563,40 @@ static const char import_injected_wast[] =
" (import \"" EOSIO_INJECTED_MODULE_NAME "\" \"checktime\" (func $inj (param i32)))" \
" (func $apply (param $0 i64) (param $1 i64) (param $2 i64))" \
")";
static const char memory_growth_memset_store[] = R"=====(
(module
(export "apply" (func $apply))
(memory $0 1)
(func $apply (param $0 i64)(param $1 i64)(param $2 i64)
(drop (grow_memory (i32.const 2)))
(i32.store (i32.const 80000) (i32.const 2))
(i32.store (i32.const 140000) (i32.const 3))
)
)
)=====";
static const char memory_growth_memset_test[] = R"=====(
(module
(export "apply" (func $apply))
(import "env" "eosio_assert" (func $eosio_assert (param i32 i32)))
(memory $0 1)
(func $apply (param $0 i64)(param $1 i64)(param $2 i64)
(drop (grow_memory (i32.const 2)))
(call $eosio_assert
(i32.eq
(i32.load offset=80000 (i32.const 0))
(i32.const 0)
)
(i32.const 0)
)
(call $eosio_assert
(i32.eq
(i32.load offset=140000 (i32.const 0))
(i32.const 0)
)
(i32.const 0)
)
)
)
)=====";
\ No newline at end of file
......@@ -38,6 +38,105 @@ BOOST_AUTO_TEST_CASE( irrblock ) try {
} FC_LOG_AND_RETHROW()
struct fork_tracker {
vector<signed_block_ptr> blocks;
incremental_merkle block_merkle;
};
BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try {
tester bios;
bios.produce_block();
bios.produce_block();
bios.create_accounts( {N(a),N(b),N(c),N(d),N(e)} );
bios.produce_block();
auto res = bios.set_producers( {N(a),N(b),N(c),N(d),N(e)} );
// run until the producers are installed and its the start of "a's" round
while( bios.control->pending_block_state()->header.producer.to_string() != "a" || bios.control->head_block_state()->header.producer.to_string() != "e") {
bios.produce_block();
}
// sync remote node
tester remote;
while( remote.control->head_block_num() < bios.control->head_block_num() ) {
auto fb = bios.control->fetch_block_by_number( remote.control->head_block_num()+1 );
remote.push_block( fb );
}
// produce 6 blocks on bios
for (int i = 0; i < 6; i ++) {
bios.produce_block();
BOOST_REQUIRE_EQUAL( bios.control->head_block_state()->header.producer.to_string(), "a" );
}
vector<fork_tracker> forks(7);
// enough to skip A's blocks
auto offset = fc::milliseconds(config::block_interval_ms * 13);
// skip a's blocks on remote
// create 7 forks of 7 blocks so this fork is longer where the ith block is corrupted
for (size_t i = 0; i < 7; i ++) {
auto b = remote.produce_block(offset);
BOOST_REQUIRE_EQUAL( b->producer.to_string(), "b" );
for (size_t j = 0; j < 7; j ++) {
auto& fork = forks.at(j);
if (j <= i) {
auto copy_b = std::make_shared<signed_block>(*b);
if (j == i) {
// corrupt this block
fork.block_merkle = remote.control->head_block_state()->blockroot_merkle;
copy_b->action_mroot._hash[0] ^= 0x1ULL;
} else if (j < i) {
// link to a corrupted chain
copy_b->previous = fork.blocks.back()->id();
}
// re-sign the block
auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), fork.block_merkle.get_root() ) );
auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule_hash) );
copy_b->producer_signature = remote.get_private_key(N(b), "active").sign(sig_digest);
// add this new block to our corrupted block merkle
fork.block_merkle.append(copy_b->id());
fork.blocks.emplace_back(copy_b);
} else {
fork.blocks.emplace_back(b);
}
}
offset = fc::milliseconds(config::block_interval_ms);
}
// go from most corrupted fork to least
for (size_t i = 0; i < forks.size(); i++) {
BOOST_TEST_CONTEXT("Testing Fork: " << i) {
const auto& fork = forks.at(i);
// push the fork to the original node
for (int fidx = 0; fidx < fork.blocks.size() - 1; fidx++) {
const auto& b = fork.blocks.at(fidx);
// push the block only if its not known already
if (!bios.control->fetch_block_by_id(b->id())) {
bios.push_block(b);
}
}
// push the block which should attempt the corrupted fork and fail
BOOST_REQUIRE_THROW(bios.push_block(fork.blocks.back()), fc::exception);
}
}
// make sure we can still produce a blocks until irreversibility moves
auto lib = bios.control->head_block_state()->dpos_irreversible_blocknum;
size_t tries = 0;
while (bios.control->head_block_state()->dpos_irreversible_blocknum == lib && ++tries < 10000) {
bios.produce_block();
}
} FC_LOG_AND_RETHROW();
BOOST_AUTO_TEST_CASE( forking ) try {
tester c;
c.produce_block();
......
......@@ -1558,6 +1558,37 @@ BOOST_FIXTURE_TEST_CASE( protect_injected, TESTER ) try {
produce_blocks(1);
} FC_LOG_AND_RETHROW()
BOOST_FIXTURE_TEST_CASE( mem_growth_memset, TESTER ) try {
produce_blocks(2);
create_accounts( {N(grower)} );
produce_block();
action act;
act.account = N(grower);
act.name = N();
act.authorization = vector<permission_level>{{N(grower),config::active_name}};
set_code(N(grower), memory_growth_memset_store);
{
signed_transaction trx;
trx.actions.push_back(act);
set_transaction_headers(trx);
trx.sign(get_private_key( N(grower), "active" ), control->get_chain_id());
push_transaction(trx);
}
produce_blocks(1);
set_code(N(grower), memory_growth_memset_test);
{
signed_transaction trx;
trx.actions.push_back(act);
set_transaction_headers(trx);
trx.sign(get_private_key( N(grower), "active" ), control->get_chain_id());
push_transaction(trx);
}
} FC_LOG_AND_RETHROW()
INCBIN(fuzz1, "fuzz1.wasm");
INCBIN(fuzz2, "fuzz2.wasm");
INCBIN(fuzz3, "fuzz3.wasm");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册