提交 b57e6831 编写于 作者: D Daniel Larimer

Merge branch 'slim' of github.com:EOSIO/eos into slim

......@@ -57,7 +57,7 @@ tests/chain_test
tests/intense_test
tests/performance_test
tests/tests/config.hpp
unittests/config.hpp
doxygen
wallet.json
......
Subproject commit 3bfd8b5fdf8df9774f116fb2014770053df73a35
Subproject commit 7f2b9581b0de10bc94917ef6a1f0d5311284a4b5
......@@ -74,8 +74,7 @@ namespace eosio { namespace chain {
result.block_signing_key = prokey.block_signing_key;
result.header.producer = prokey.producer_name;
result.prior_pending_schedule_hash = pending_schedule_hash;
result.pending_schedule_lib_num = pending_schedule_lib_num;
result.pending_schedule_hash = pending_schedule_hash;
result.block_num = block_num + 1;
result.producer_to_last_produced = producer_to_last_produced;
......
......@@ -181,10 +181,6 @@ struct controller_impl {
resource_limits.add_indices();
}
void abort_pending_block() {
pending.reset();
}
void clear_all_undo() {
// Rewind the database to the last irreversible block
db.with_write_lock([&] {
......@@ -402,7 +398,7 @@ struct controller_impl {
void push_unapplied_transaction( fc::time_point deadline ) {
auto itr = unapplied_transactions.begin();
if( itr == unapplied_transactions.end() )
if( itr == unapplied_transactions.end() )
return;
push_transaction( itr->second, deadline );
......@@ -414,8 +410,8 @@ struct controller_impl {
* determine whether to execute it now or to delay it. Lastly it inserts a transaction receipt into
* the pending block.
*/
void push_transaction( const transaction_metadata_ptr& trx,
fc::time_point deadline = fc::time_point::maximum(),
void push_transaction( const transaction_metadata_ptr& trx,
fc::time_point deadline = fc::time_point::maximum(),
bool implicit = false ) {
if( deadline == fc::time_point() ) {
unapplied_transactions[trx->id] = trx;
......@@ -464,21 +460,40 @@ struct controller_impl {
void start_block( block_timestamp_type when ) {
FC_ASSERT( !pending );
FC_ASSERT( !pending );
FC_ASSERT( db.revision() == head->block_num, "",
FC_ASSERT( db.revision() == head->block_num, "",
("db_head_block", db.revision())("controller_head_block", head->block_num)("fork_db_head_block", fork_db.head()->block_num) );
pending = db.start_undo_session(true);
pending->_pending_block_state = std::make_shared<block_state>( *head, when );
pending->_pending_block_state->in_current_chain = true;
pending = db.start_undo_session(true);
pending->_pending_block_state = std::make_shared<block_state>( *head, when ); // promotes pending schedule (if any) to active
pending->_pending_block_state->in_current_chain = true;
const auto& gpo = db.get<global_property_object>();
if( gpo.proposed_schedule_block_num.valid() && // if there is a proposed schedule that was proposed in a block ...
( *gpo.proposed_schedule_block_num <= pending->_pending_block_state->dpos_last_irreversible_blocknum ) && // ... that has now become irreversible ...
pending->_pending_block_state->pending_schedule.producers.size() == 0 && // ... and there is room for a new pending schedule ...
head->pending_schedule.producers.size() == 0 // ... and not just because it was promoted to active at the start of this block, then:
)
{
// Promote proposed schedule to pending schedule.
ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ",
("proposed_num", *gpo.proposed_schedule_block_num)("n", pending->_pending_block_state->block_num)
("lib", pending->_pending_block_state->dpos_last_irreversible_blocknum)
("schedule", static_cast<producer_schedule_type>(gpo.proposed_schedule) ) );
pending->_pending_block_state->set_new_producers( gpo.proposed_schedule );
db.modify( gpo, [&]( auto& gp ) {
gp.proposed_schedule_block_num = optional<block_num_type>();
gp.proposed_schedule.clear();
});
}
try {
auto onbtrx = std::make_shared<transaction_metadata>( get_on_block_transaction() );
push_transaction( onbtrx, fc::time_point::maximum(), true );
} catch ( ... ) {
ilog( "on block transaction failed, but shouldn't impact block generation, system contract needs update" );
}
try {
auto onbtrx = std::make_shared<transaction_metadata>( get_on_block_transaction() );
push_transaction( onbtrx, fc::time_point::maximum(), true );
} catch ( ... ) {
ilog( "on block transaction failed, but shouldn't impact block generation, system contract needs update" );
}
} // start_block
......@@ -545,7 +560,7 @@ struct controller_impl {
throw;
}
} else if( new_head->id != head->id ) {
wlog("switching forks from ${current_head_id} (block number ${current_head_num}) to ${new_head_id} (block number ${new_head_num})",
ilog("switching forks from ${current_head_id} (block number ${current_head_num}) to ${new_head_id} (block number ${new_head_num})",
("current_head_id", head->id)("current_head_num", head->block_num)("new_head_id", new_head->id)("new_head_num", new_head->block_num) );
auto branches = fork_db.fetch_branch_from( new_head->id, head->id );
......@@ -565,7 +580,7 @@ struct controller_impl {
}
catch (const fc::exception& e) { except = e; }
if (except) {
wlog("exception thrown while switching forks ${e}", ("e",except->to_detail_string()));
elog("exception thrown while switching forks ${e}", ("e",except->to_detail_string()));
while (ritr != branches.first.rend() ) {
fork_db.set_validity( *ritr, false );
......@@ -589,7 +604,7 @@ struct controller_impl {
throw *except;
} // end if exception
} /// end for each block in branch
wlog("successfully switched fork to new head ${new_head_id}", ("new_head_id", new_head->id));
ilog("successfully switched fork to new head ${new_head_id}", ("new_head_id", new_head->id));
}
} /// push_block
......@@ -831,7 +846,7 @@ transaction_trace_ptr controller::sync_push( const transaction_metadata_ptr& trx
void controller::push_next_scheduled_transaction( fc::time_point deadline ) {
const auto& idx = db().get_index<generated_transaction_multi_index,by_delay>();
if( idx.begin() != idx.end() )
if( idx.begin() != idx.end() )
my->push_scheduled_transaction( *idx.begin(), deadline );
}
void controller::push_scheduled_transaction( const transaction_id_type& trxid, fc::time_point deadline ) {
......@@ -908,19 +923,41 @@ void controller::pop_block() {
my->pop_block();
}
void controller::set_active_producers( const producer_schedule_type& sch ) {
FC_ASSERT( !my->pending->_pending_block_state->header.new_producers, "this block has already set new producers" );
FC_ASSERT( !my->pending->_pending_block_state->pending_schedule.producers.size(), "there is already a pending schedule, wait for it to become active" );
my->pending->_pending_block_state->set_new_producers( sch );
void controller::set_proposed_producers( const producer_schedule_type& sch ) {
const auto& gpo = get_global_properties();
auto cur_block_num = head_block_num() + 1;
FC_ASSERT( !gpo.proposed_schedule_block_num.valid() || *gpo.proposed_schedule_block_num == cur_block_num,
"there is already a proposed schedule set in a previous block, wait for it to become pending" );
uint32_t next_proposed_schedule_version = 0;
if( my->pending->_pending_block_state->pending_schedule.producers.size() == 0 ) {
next_proposed_schedule_version = my->pending->_pending_block_state->active_schedule.version + 1;
} else {
next_proposed_schedule_version = my->pending->_pending_block_state->pending_schedule.version + 1;
}
FC_ASSERT( sch.version == next_proposed_schedule_version, "wrong producer schedule version specified" );
my->db.modify( gpo, [&]( auto& gp ) {
gp.proposed_schedule_block_num = cur_block_num;
gp.proposed_schedule = sch;
});
}
const producer_schedule_type& controller::active_producers()const {
const producer_schedule_type& controller::active_producers()const {
return my->pending->_pending_block_state->active_schedule;
}
const producer_schedule_type& controller::pending_producers()const {
const producer_schedule_type& controller::pending_producers()const {
return my->pending->_pending_block_state->pending_schedule;
}
optional<producer_schedule_type> controller::proposed_producers()const {
const auto& gpo = get_global_properties();
if( !gpo.proposed_schedule_block_num.valid() )
return optional<producer_schedule_type>();
return gpo.proposed_schedule;
}
const apply_handler* controller::find_apply_handler( account_name receiver, account_name scope, action_name act ) const
{
auto native_handler_scope = my->apply_handlers.find( receiver );
......
......@@ -14,7 +14,6 @@ struct block_header_state {
signed_block_header header;
uint32_t dpos_last_irreversible_blocknum = 0;
uint32_t pending_schedule_lib_num = 0; /// last irr block num
digest_type prior_pending_schedule_hash;
digest_type pending_schedule_hash;
producer_schedule_type pending_schedule;
producer_schedule_type active_schedule;
......
......@@ -71,7 +71,7 @@ namespace eosio { namespace chain {
/**
*
*
*/
void push_transaction( const transaction_metadata_ptr& trx = transaction_metadata_ptr(),
fc::time_point deadline = fc::time_point::maximum() );
......@@ -131,8 +131,9 @@ namespace eosio { namespace chain {
account_name head_block_producer()const;
const block_header& head_block_header()const;
const producer_schedule_type& active_producers()const;
const producer_schedule_type& pending_producers()const;
const producer_schedule_type& active_producers()const;
const producer_schedule_type& pending_producers()const;
optional<producer_schedule_type> proposed_producers()const;
uint32_t last_irreversible_block_num() const;
......@@ -144,7 +145,7 @@ namespace eosio { namespace chain {
void validate_tapos( const transaction& t )const;
uint64_t validate_net_usage( const transaction_metadata_ptr& trx )const;
void set_active_producers( const producer_schedule_type& sch );
void set_proposed_producers( const producer_schedule_type& sch );
signal<void(const block_state_ptr&)> accepted_block_header;
signal<void(const block_state_ptr&)> accepted_block;
......
......@@ -16,14 +16,6 @@
namespace eosio { namespace chain {
struct blocknum_producer_schedule {
blocknum_producer_schedule( allocator<char> a )
:second(a){}
block_num_type first;
shared_producer_schedule_type second;
};
/**
* @class global_property_object
* @brief Maintains global state information (committee_member list, current fees)
......@@ -34,10 +26,12 @@ namespace eosio { namespace chain {
*/
class global_property_object : public chainbase::object<global_property_object_type, global_property_object>
{
OBJECT_CTOR(global_property_object)
OBJECT_CTOR(global_property_object, (proposed_schedule))
id_type id;
chain_config configuration;
id_type id;
optional<block_num_type> proposed_schedule_block_num;
shared_producer_schedule_type proposed_schedule;
chain_config configuration;
};
......@@ -56,7 +50,7 @@ namespace eosio { namespace chain {
OBJECT_CTOR(dynamic_global_property_object)
id_type id;
uint64_t global_action_sequence = 0;
uint64_t global_action_sequence = 0;
};
using global_property_multi_index = chainbase::shared_multi_index_container<
......@@ -88,5 +82,5 @@ FC_REFLECT(eosio::chain::dynamic_global_property_object,
)
FC_REFLECT(eosio::chain::global_property_object,
(configuration)
(proposed_schedule_block_num)(proposed_schedule)(configuration)
)
......@@ -56,6 +56,11 @@ namespace eosio { namespace chain {
return result;
}
void clear() {
version = 0;
producers.clear();
}
uint32_t version = 0; ///< sequentially incrementing version number
shared_vector<producer_key> producers;
};
......
......@@ -150,7 +150,7 @@ class privileged_api : public context_aware_api {
unique_producers.insert(p.producer_name);
}
EOS_ASSERT(psch.producers.size() == unique_producers.size(), wasm_execution_error, "duplicate producer name in producer schedule");
context.mutable_controller.set_active_producers( psch );
context.mutable_controller.set_proposed_producers( psch );
}
uint32_t get_blockchain_parameters_packed( array_ptr<char> packed_blockchain_parameters, size_t datalen) {
......
......@@ -16,9 +16,11 @@ int main( int argc, char** argv ) {
auto r = c.create_accounts( {N(dan),N(sam),N(pam)} );
wdump((fc::json::to_pretty_string(r)));
c.set_producers( {N(dan),N(sam),N(pam)}, 1 );
wlog("set producer schedule to [dan,sam,pam]");
c.produce_blocks(30);
c.create_accounts( {N(eosio.token)} );
auto r2 = c.create_accounts( {N(eosio.token)} );
wdump((fc::json::to_pretty_string(r2)));
c.set_code( N(eosio.token), eosio_token_wast );
c.set_abi( N(eosio.token), eosio_token_abi );
c.produce_blocks(10);
......@@ -65,7 +67,11 @@ int main( int argc, char** argv ) {
FC_ASSERT( b->producer == expected_producer,
"expected block ${n} to be produced by ${expected_producer} but was instead produced by ${actual_producer}",
("n", b->block_num())("expected_producer", expected_producer.to_string())("actual_producer", b->producer.to_string()) );
c.produce_blocks(11);
c.produce_blocks(10);
c.create_accounts( {N(cam)} );
c.set_producers( {N(dan),N(sam),N(pam),N(cam)}, 2 );
wlog("set producer schedule to [dan,sam,pam,cam]");
c.produce_block();
// The next block should be produced by pam.
// Sync second chain with first chain.
......@@ -107,6 +113,60 @@ int main( int argc, char** argv ) {
}
wlog( "end push c2 blocks to c1" );
wlog( "c1 blocks:" );
c.produce_blocks(24);
b = c.produce_block(); // Switching active schedule to version 2 happens in this block.
expected_producer = N(pam);
FC_ASSERT( b->producer == expected_producer,
"expected block ${n} to be produced by ${expected_producer} but was instead produced by ${actual_producer}",
("n", b->block_num())("expected_producer", expected_producer.to_string())("actual_producer", b->producer.to_string()) );
b = c.produce_block();
expected_producer = N(cam);
FC_ASSERT( b->producer == expected_producer,
"expected block ${n} to be produced by ${expected_producer} but was instead produced by ${actual_producer}",
("n", b->block_num())("expected_producer", expected_producer.to_string())("actual_producer", b->producer.to_string()) );
c.produce_blocks(10);
wlog( "push c1 blocks to c2" );
while( c2.control->head_block_num() < c.control->head_block_num() ) {
auto fb = c.control->fetch_block_by_number( c2.control->head_block_num()+1 );
c2.control->push_block( fb );
}
wlog( "end push c1 blocks to c2" );
// Now with four block producers active and two identical chains (for now),
// we can test out the case that would trigger the bug in the old fork db code:
fork_block_num = c.control->head_block_num();
wlog( "cam and dan go off on their own fork on c1 while sam and pam go off on their own fork on c2" );
wlog( "c1 blocks:" );
c.produce_blocks(12); // dan produces 12 blocks
c.produce_block( fc::milliseconds(config::block_interval_ms * 25) ); // cam skips over sam and pam's blocks
c.produce_blocks(23); // cam finishes the remaining 11 blocks then dan produces his 12 blocks
wlog( "c2 blocks:" );
c2.produce_block( fc::milliseconds(config::block_interval_ms * 25) ); // pam skips over dan and sam's blocks
c2.produce_blocks(11); // pam finishes the remaining 11 blocks
c2.produce_block( fc::milliseconds(config::block_interval_ms * 25) ); // sam skips over cam and dan's blocks
c2.produce_blocks(11); // sam finishes the remaining 11 blocks
wlog( "now cam and dan rejoin sam and pam on c2" );
c2.produce_block( fc::milliseconds(config::block_interval_ms * 13) ); // cam skips over pam's blocks (this block triggers a block on this branch to become irreversible)
c2.produce_blocks(11); // cam produces the remaining 11 blocks
b = c2.produce_block(); // dan produces a block
// a node on chain 1 now gets all but the last block from chain 2 which should cause a fork switch
wlog( "push c2 blocks (except for the last block by dan) to c1" );
for( uint32_t start = fork_block_num + 1, end = c2.control->head_block_num() - 1; start <= end; ++start ) {
auto fb = c2.control->fetch_block_by_number( start );
c.control->push_block( fb );
}
wlog( "end push c2 blocks to c1" );
wlog( "now push dan's block to c1 but first corrupt it so it is a bad block" );
auto bad_block = *b;
bad_block.producer = N(sam);
// bad_block.transaction_mroot = bad_block.action_mroot; // TODO/BUG: why did this not cause block rejection?
c.control->push_block( std::make_shared<signed_block>(bad_block) );
} FC_CAPTURE_AND_RETHROW()
} catch ( const fc::exception& e ) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册