提交 a9343907 编写于 作者: D Daniel Larimer

progress of refactor

上级 3489781d
......@@ -25,6 +25,14 @@ namespace eosio { namespace chain {
return active_schedule.producers[index];
}
bool block_header_state::is_start_of_round( uint32_t block_num )const {
return 0 == (block_num % blocks_per_round());
}
uint32_t block_header_state::blocks_per_round()const {
return active_producers.producers.size()*config::producer_repetitions;
}
/**
* Transitions the current header state into the next header state given the supplied signed block header.
......@@ -43,7 +51,8 @@ namespace eosio { namespace chain {
block_header_state result(*this);
result.id = h.id();
result.producer_to_last_produced[h.producer] = h.block_num();
result.block_num = h.block_num();
result.producer_to_last_produced[h.producer] = result.block_num;
result.dpos_last_irreversible_blocknum = result.calc_dpos_last_irreversible();
result.blockroot_merkle.append( result.id );
......@@ -65,6 +74,9 @@ namespace eosio { namespace chain {
}
if( h.new_producers ) {
EOS_ASSERT( is_start_of_round( result.block_num ), block_validate_exception,
"Producer changes may only occur at the end of a round.");
FC_ASSERT( h.new_producers->version == result.active_schedule.version + 1, "wrong producer schedule version specified" );
FC_ASSERT( result.pending_schedule.producers.size() == 0,
"cannot set new pending producers until last pending is confirmed" );
......
......@@ -2,17 +2,113 @@
namespace eosio { namespace chain {
/**
* Perform context free validation of transaction state
*/
void validate( const transaction& trx )const {
EOS_ASSERT( !trx.actions.empty(), tx_no_action, "transaction must have at least one action" );
// Check for at least one authorization in the context-aware actions
bool has_auth = false;
for( const auto& act : trx.actions ) {
has_auth |= !act.authorization.empty();
if( has_auth ) break;
}
EOS_ASSERT( has_auth, tx_no_auths, "transaction must have at least one authorization" );
// Check that there are no authorizations in any of the context-free actions
for (const auto &act : trx.context_free_actions) {
EOS_ASSERT( act.authorization.empty(), cfa_irrelevant_auth,
"context-free actions cannot require authorization" );
}
EOS_ASSERT( trx.max_kcpu_usage.value < UINT32_MAX / 1024UL, transaction_exception, "declared max_kcpu_usage overflows when expanded to max cpu usage" );
EOS_ASSERT( trx.max_net_usage_words.value < UINT32_MAX / 8UL, transaction_exception, "declared max_net_usage_words overflows when expanded to max net usage" );
} /// validate
void validate_shard_locks_unique(const vector<shard_lock>& locks, const string& tag) {
if (locks.size() < 2) {
return;
}
for (auto cur = locks.begin() + 1; cur != locks.end(); ++cur) {
auto prev = cur - 1;
EOS_ASSERT(*prev != *cur, block_lock_exception, "${tag} lock \"${a}::${s}\" is not unique", ("tag",tag)("a",cur->account)("s",cur->scope));
EOS_ASSERT(*prev < *cur, block_lock_exception, "${tag} locks are not sorted", ("tag",tag));
}
}
void validate_shard_locks( const shard_summary& shard ) {
validate_shard_locks_unique( shard.read_locks, "read" );
validate_shard_locks_unique( shard.write_locks, "write" );
// validate that no read_scope is used as a write scope in this cycle and that no two shards
// share write scopes
set<shard_lock> read_locks;
map<shard_lock, uint32_t> write_locks;
for (const auto& s: shard.read_locks) {
EOS_ASSERT(write_locks.count(s) == 0, block_concurrency_exception,
"shard ${i} requires read lock \"${a}::${s}\" which is locked for write by shard ${j}",
("i", shard_index)("s", s)("j", write_locks[s]));
read_locks.emplace(s);
}
for (const auto& s: shard.write_locks) {
EOS_ASSERT(write_locks.count(s) == 0, block_concurrency_exception,
"shard ${i} requires write lock \"${a}::${s}\" which is locked for write by shard ${j}",
("i", shard_index)("a", s.account)("s", s.scope)("j", write_locks[s]));
EOS_ASSERT(read_locks.count(s) == 0, block_concurrency_exception,
"shard ${i} requires write lock \"${a}::${s}\" which is locked for read",
("i", shard_index)("a", s.account)("s", s.scope));
write_locks[s] = shard_index;
}
}
block_state::block_state( block_header_state h, signed_block_ptr b )
:block_header_state( move(h) ), block(move(b))
{
if( block ) {
for( const auto& packed : b->input_transactions ) {
auto signed_trx = packed.get_signed_transaction();
auto id = signed_trx.id();
input_transactions[id] = move(signed_trx);
auto meta_ptr = std::make_shared<transaction_metadata>( packed, chain_id_type() );
/** perform context-free validation of transactions */
const auto& trx = meta_ptr->trx();
FC_ASSERT( time_point(trx.expiration) > header.timestamp, "transaction is expired" );
validate( trx );
auto id = meta_ptr->id;
input_transactions[id] = move(meta_ptr);
}
}
}
FC_ASSERT( block->regions.size() >= 1, "must be at least one region" );
/// regions must be listed in order
for( uint32_t i = 1; i < block->regions.size(); ++i )
FC_ASSERT( block->regions[i-1].region < block->regions[i].region );
trace = std::make_shared<block_trace>();
/// reserve region_trace
for( uint32_t r = 0; r < block->regions.size(); ++r ) {
FC_ASSERT( block->regions[r].cycles.size() >= 1, "must be at least one cycle" );
/// reserve cycle traces
for( uint32_t c = 0; c < block->regions[r].cycles.size(); c++ ) {
FC_ASSERT( block->regions[r].cycles.size() >= 1, "must be at least one shard" );
/// reserve shard traces
for( uint32_t s = 0; s < block->regions[r].cycles[c][s].size(); s++ ) {
// FC_ASSERT( block->regions[r].cycles.size() >= 1, "must be at least one trx" ); ///
//validate_shard_locks( block->.... )
/// reserve transaction trace...
}
}
}
} // end if block
}
} } /// eosio::chain
#include <eosio/chain/controller.hpp>
#include <chainbase/database.hpp>
#include <eosio/chain/block_summary_object.hpp>
#include <eosio/chain/global_property_object.hpp>
#include <eosio/chain/contracts/contract_table_objects.hpp>
#include <eosio/chain/action_objects.hpp>
#include <eosio/chain/generated_transaction_object.hpp>
#include <eosio/chain/transaction_object.hpp>
#include <eosio/chain/producer_object.hpp>
#include <eosio/chain/permission_link_object.hpp>
#include <eosio/chain/resource_limits.hpp>
namespace eosio { namespace chain {
using resource_limits::resource_limits_manager;
struct pending_state {
pending_state( database::session&& s )
:_db_session(s){}
......@@ -14,27 +27,300 @@ struct pending_state {
struct controller_impl {
chainbase::database db;
block_log blog;
optional<pending_state> pending;
block_state_ptr head;
fork_database fork_db;
wasm_interface wasmif;
resource_limits_manager resource_limits;
controller::config conf;
controller& self;
wasm_interface _wasm_interface;
controller_impl() {
controller_impl( const controller::config& cfg, controller& s )
:db( cfg.shared_memory_dir,
cfg.read_only ? database::read_only : database::read_write,
cfg.shared_memory_size ),
blog( cfg.block_log_dir ),
wasmif( cfg.wasm_runtime ),
resource_limits( db ),
conf( cfg ),
self( s )
{
initialize_indicies();
/**
* The undable state contains state transitions from blocks
* in the fork database that could be reversed. Because this
* is a new startup and the fork database is empty, we must
* unwind that pending state. This state will be regenerated
* when we catch up to the head block later.
*/
clear_all_undo();
initialize_fork_db();
}
~controller_impl() {
pending.reset();
db.flush();
}
void initialize_indicies() {
db.add_index<account_index>();
db.add_index<permission_index>();
db.add_index<permission_usage_index>();
db.add_index<permission_link_index>();
db.add_index<action_permission_index>();
db.add_index<contracts::table_id_multi_index>();
db.add_index<contracts::key_value_index>();
db.add_index<contracts::index64_index>();
db.add_index<contracts::index128_index>();
db.add_index<contracts::index256_index>();
db.add_index<contracts::index_double_index>();
db.add_index<global_property_multi_index>();
db.add_index<dynamic_global_property_multi_index>();
db.add_index<block_summary_multi_index>();
db.add_index<transaction_multi_index>();
db.add_index<generated_transaction_multi_index>();
db.add_index<producer_multi_index>();
db.add_index<scope_sequence_multi_index>();
resource_limits.initialize_database();
}
void abort_pending_block() {
pending.reset();
}
void clear_all_undo() {
// Rewind the database to the last irreversible block
db.with_write_lock([&] {
db.undo_all();
FC_ASSERT(db.revision() == self.head_block_num(),
"Chainbase revision does not match head block num",
("rev", db.revision())("head_block", self.head_block_num()));
});
}
/**
* The fork database needs an initial block_state to be set before
* it can accept any new blocks. This initial block state can be found
* in the database (whose head block state should be irreversible) or
* it would be the genesis state.
*/
void initialize_fork_db() {
}
block_state_ptr push_block( const signed_block_ptr& b ) {
return without_pending_transactions( [&](){
return db.with_write_lock( [&](){
return push_block_impl( b );
});
});
}
void abort_pending_block();
block_state_ptr push_block_impl( const signed_block_ptr& b ) {
auto head_state = fork_db.add( b );
/// check to see if we entered a fork
if( head_state->header.previous != head_block_id() ) {
auto branches = fork_db.fetch_branch_from(head_state->id, head_block_id());
while (head_block_id() != branches.second.back()->header.previous)
pop_block();
/** apply all blocks from new fork */
for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr) {
optional<fc::exception> except;
try {
apply_block( *ritr );
}
catch (const fc::exception& e) { except = e; }
if (except) {
wlog("exception thrown while switching forks ${e}", ("e",except->to_detail_string()));
while (ritr != branches.first.rend() ) {
fork_db.set_validity( *ritr, false );
++ritr;
}
// pop all blocks from the bad fork
while( head_block_id() != branches.second.back()->header.previous )
pop_block();
for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) {
apply_block( (*ritr) );
}
throw *except;
} // end if exception
} /// end for each block in branch
return head_state;
} /// end if fork
try {
apply_block( head_state );
} catch ( const fc::exception& e ) {
elog("Failed to push new block:\n${e}", ("e", e.to_detail_string()));
fork_db.set_validity( head_state, false );
throw;
}
} /// push_block_impl
bool should_enforce_runtime_limits()const {
return false;
}
void validate_net_usage( const transaction& trx, uint32_t min_net_usage ) {
uint32_t net_usage_limit = trx.max_net_usage_words.value * 8; // overflow checked in validate_transaction_without_state
EOS_ASSERT( net_usage_limit == 0 || min_net_usage <= net_usage_limit,
transaction_exception,
"Packed transaction and associated data does not fit into the space committed to by the transaction's header! [usage=${usage},commitment=${commit}]",
("usage", min_net_usage)("commit", net_usage_limit));
} /// validate_net_usage
void finalize_block( const block_trace& trace )
{ try {
const auto& b = trace.block;
update_global_properties( b );
update_global_dynamic_data( b );
update_signing_producer(signing_producer, b);
create_block_summary(b);
clear_expired_transactions();
update_last_irreversible_block();
resource_limits.process_account_limit_updates();
const auto& chain_config = self.get_global_properties().configuration;
_resource_limits.set_block_parameters(
{EOS_PERCENT(chain_config.max_block_cpu_usage, chain_config.target_block_cpu_usage_pct), chain_config.max_block_cpu_usage, config::block_cpu_usage_average_window_ms / config::block_interval_ms, 1000, {99, 100}, {1000, 999}},
{EOS_PERCENT(chain_config.max_block_net_usage, chain_config.target_block_net_usage_pct), chain_config.max_block_net_usage, config::block_size_average_window_ms / config::block_interval_ms, 1000, {99, 100}, {1000, 999}}
);
} FC_CAPTURE_AND_RETHROW() }
void clear_expired_transactions() {
//Look for expired transactions in the deduplication list, and remove them.
auto& transaction_idx = _db.get_mutable_index<transaction_multi_index>();
const auto& dedupe_index = transaction_idx.indices().get<by_expiration>();
while( (!dedupe_index.empty()) && (head_block_time() > fc::time_point(dedupe_index.begin()->expiration) ) ) {
transaction_idx.remove(*dedupe_index.begin());
}
// Look for expired transactions in the pending generated list, and remove them.
// TODO: expire these by sending error to handler
auto& generated_transaction_idx = _db.get_mutable_index<generated_transaction_multi_index>();
const auto& generated_index = generated_transaction_idx.indices().get<by_expiration>();
while( (!generated_index.empty()) && (head_block_time() > generated_index.begin()->expiration) ) {
_destroy_generated_transaction(*generated_index.begin());
}
}
bool should_check_tapos()const { return true; }
void validate_tapos( const transaction& trx )const {
if( !should_check_tapos() ) return;
const auto& tapos_block_summary = _db.get<block_summary_object>((uint16_t)trx.ref_block_num);
//Verify TaPoS block summary has correct ID prefix, and that this block's time is not past the expiration
EOS_ASSERT(trx.verify_reference_block(tapos_block_summary.block_id), invalid_ref_block_exception,
"Transaction's reference block did not match. Is this transaction from a different fork?",
("tapos_summary", tapos_block_summary));
}
void apply_block( block_state_ptr bstate ) {
auto session = _db.start_undo_session(true);
optional<fc::time_point> processing_deadline;
if( should_enforce_runtime_limits() ) {
processing_deadline = fc::time_point::now() + cfg.limits.max_push_block_us;
}
auto& next_block_trace = bstate->trace;
next_block_trace->implicit_transactions.emplace_back( get_on_block_transaction() );
/** these validation steps are independent of processing order, in theory they could be
* performed in parallel */
for( const auto& item : bstate->input_transactions ) {
const auto& trx = item.second->trx();
validate_tapos( trx );
validate_net_usage( trx, item.second->billable_packed_size );
}
for( uint32_t region_index = 0; region_index < next_block.regions.size(); ++region_index ) {
apply_region( region_index, next_block_trace, bstate->block.regions[region_index] );
}
FC_ASSERT( bstate->header.action_mroot == next_block_trace.calculate_action_merkle_root(),
"action merkle root does not match" );
finalize_block( *next_block_trace );
fork_db.set_validity( bstate, true );
applied_block( next_block_trace ); /// emit signal to plugins before exiting undo state
session.push();
} /// apply_block
void apply_region( uint32_t region_index, block_trace& b_trace, const block_state& bstate ) {
const auto& r = bstate.block->regions[region_index];
EOS_ASSERT(!r.cycles_summary.empty(), tx_empty_region,"region[${r_index}] has no cycles", ("r_index",region_index));
for( uint32_t cycle_index = 0; cycle_index < r.cycles_summary.size(); cycle_index++ ) {
apply_cycle( region_index, cycle_index, b_trace, bstate );
}
}
void apply_cycle( uint32_t region_index, uint32_t cycle_index, block_trace& b_trace, const block_state& bstate ) {
const auto& c = bstate.block->regions[region_index].cycles[cycle_index];
for( uint32_t shard_index = 0; shard_index < c.size(); ++shard_index ) {
apply_shard( region_index, cycle_index, shard_index, b_trace, bstate );
}
resource_limits.synchronize_account_ram_usage();
_apply_cycle_trace(c_trace);
}
void apply_shard( uint32_t region_index,
uint32_t cycle_index,
uint32_t shard_index,
block_trace& b_trace, const block_state& bstate ) {
shard_trace& s_trace = b_trace.region_traces[region_index].cycle_traces[cycle_index].shard_traces[shard_index];
const auto& shard = bstate.block->regions[region_index].cycles[cycle_index][shard_index];
EOS_ASSERT(!shard.empty(), tx_empty_shard,"region[${r_index}] cycle[${c_index}] shard[${s_index}] is empty",
("r_index",region_index)("c_index",cycle_index)("s_index",shard_index));
flat_set<shard_lock> used_read_locks;
flat_set<shard_lock> used_write_locks;
for( const auto& receipt : shard.transitions ) {
// apply_transaction( ... );
}
EOS_ASSERT( boost::equal( used_read_locks, shard.read_locks ),
block_lock_exception, "Read locks for executing shard: ${s} do not match those listed in the block",
("s", shard_index));
EOS_ASSERT( boost::equal( used_write_locks, shard.write_locks ),
block_lock_exception, "Write locks for executing shard: ${s} do not match those listed in the block",
("s", shard_index));
s_trace.finalize_shard();
}
};
controller::controller():my( new controller_impl() )
controller::controller( const controller::config& cfg )
:my( new controller_impl( cfg, *this ) )
{
}
......@@ -42,11 +328,19 @@ controller::~controller() {
}
const chainbase::database& controller::db()const { return my->db; }
void controller::startup() {
auto head = my->blog.read_head();
if( head && head_block_num() < head->bock_num() ) {
wlog( "\nDatabase in inconsistant state, replaying block log..." );
replay();
}
}
const chainbase::database& controller::db()const { return my->db; }
block_state_ptr controller::push_block( const signed_block_ptr& b ) {
block_state_ptr controller::push_block( const signed_block_ptr& b ) {
return my->push_block( b );
}
transaction_trace controller::push_transaction( const signed_transaction& t ) {
......
......@@ -24,6 +24,7 @@ struct block_header_state {
uint32_t calc_dpos_last_irreversible()const;
bool is_active_producer( account_name n )const;
producer_key scheduled_producer( block_timestamp_type t )const;
bool is_start_of_round( uint32_t block_num )const;
};
......
......@@ -7,10 +7,10 @@ namespace eosio { namespace chain {
struct block_state : public block_header_state {
block_state( block_header_state h, signed_block_ptr b );
signed_block_ptr block;
map<transaction_id_type,signed_transaction> input_transactions;
bool validated = false;
// block_trace_ptr trace;
signed_block_ptr block;
map<transaction_id_type,transaction_metadata_ptr> input_transactions;
bool validated = false;
block_trace_ptr trace;
};
typedef std::shared_ptr<block_state> block_state_ptr;
......
......@@ -25,14 +25,14 @@ namespace eosio { namespace chain {
contracts::genesis_state_type genesis;
runtime_limits limits;
wasm_interface::vm_type wasm_runtime = config::default_wasm_runtime;
wasm_interface::vm_type wasm_runtime = config::default_wasm_runtime;
};
controller();
controller( const config& cfg );
~controller();
void startup( const config& cfg );
void startup();
/**
* Starts a new pending block session upon which new transactions can
......@@ -47,6 +47,8 @@ namespace eosio { namespace chain {
const chainbase::database& db()const;
uint32_t head_block_num();
signal<void(const block_trace&)> applied_block;
signal<void(const signed_block&)> applied_irreversible_block;
......
......@@ -25,7 +25,7 @@ namespace eosio { namespace chain {
fork_database();
~fork_database();
block_state_ptr get_block(const block_id_type& id)const;
block_state_ptr get_block(const block_id_type& id)const;
// vector<block_state_ptr> get_blocks_by_number(uint32_t n)const;
/**
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册