After `docker-compose up`, two services named nodeos and walletd will be started. nodeos service would expose ports 8888 and 9876 to the host. walletd service does not expose any port to the host, it is only accessible to cleos when runing cleos is running inside the walletd container as described in "Execute cleos commands" section.
After `docker-compose up -d`, two services named `nodeosd` and `keosd` will be started. nodeos service would expose ports 8888 and 9876 to the host. keosd service does not expose any port to the host, it is only accessible to cleos when runing cleos is running inside the keosd container as described in "Execute cleos commands" section.
### Execute cleos commands
...
...
@@ -54,7 +57,7 @@ After `docker-compose up`, two services named nodeos and walletd will be started
You can run the `cleos` commands via a bash alias.
```bash
alias cleos='docker-compose exec walletd /opt/eosio/bin/cleos -H nodeos'
alias cleos='docker-compose exec keosd /opt/eosio/bin/cleos -H nodeosd'
controller.validate_uniqueness(trx);// TODO: Move this out of here when we have concurrent shards to somewhere we can check for conflicts between shards.
EOS_ASSERT(trx.max_net_usage_words.value<0x20000000,transaction_exception,"overflow of max_net_usage_words");
EOS_ASSERT(trx.max_kcpu_usage.value<0x400000,transaction_exception,"overflow of max_kcpu_usage");
EOS_ASSERT(trx.max_kcpu_usage.value<UINT32_MAX/1024UL,transaction_exception,"declared max_kcpu_usage overflows when expanded to max cpu usage");
EOS_ASSERT(trx.max_net_usage_words.value<UINT32_MAX/8UL,transaction_exception,"declared max_net_usage_words overflows when expanded to max net usage");
}FC_CAPTURE_AND_RETHROW((trx))}
...
...
@@ -1943,6 +1960,9 @@ transaction_trace chain_controller::_apply_transaction( transaction_metadata& me
returnresult;
}catch(...){
if(meta.is_implicit){
try{
throw;
}FC_CAPTURE_AND_LOG((meta.id));
transaction_traceresult(meta.id);
result.status=transaction_trace::hard_fail;
returnresult;
...
...
@@ -2009,7 +2029,7 @@ transaction_trace chain_controller::_apply_error( transaction_metadata& meta ) {
staticconstuint64_toverhead=overhead_per_row_per_index_ram_bytes*2;///< overhead for 2x indices internal-key and code,scope,table
staticconstuint64_tvalue=44+overhead;///< 36 bytes for constant size fields + overhead
};
template<>
structbillable_size<contracts::key_value_object>{
staticconstuint64_toverhead=overhead_per_row_per_index_ram_bytes*2;///< overhead for potentially single-row table, 2x indices internal-key and primary key
staticconstuint64_tvalue=32+8+4+overhead;///< 32 bytes for our constant size fields, 8 for pointer to vector data, 4 bytes for a size of vector + overhead
};
template<>
structbillable_size<contracts::index64_object>{
staticconstuint64_toverhead=overhead_per_row_per_index_ram_bytes*3;///< overhead for potentially single-row table, 3x indices internal-key, primary key and primary+secondary key
staticconstuint64_toverhead=overhead_per_row_per_index_ram_bytes*3;///< overhead for potentially single-row table, 3x indices internal-key, primary key and primary+secondary key
staticconstuint64_toverhead=overhead_per_row_per_index_ram_bytes*3;///< overhead for potentially single-row table, 3x indices internal-key, primary key and primary+secondary key
staticconstuint64_toverhead=overhead_per_row_per_index_ram_bytes*3;///< overhead for potentially single-row table, 3x indices internal-key, primary key and primary+secondary key
staticconstuint64_toverhead=overhead_per_row_per_index_ram_bytes*5;///< overhead for 5x indices internal-key, txid, expiration, delay, sender_id
staticconstuint64_tvalue=96+4+overhead;///< 96 bytes for our constant size fields, 4 bytes for a varint for packed_trx size and 96 bytes of implementation overhead
// this is enough iterations for the average to reach/exceed the target (triggering congestion handling) and then the iterations to contract down to the min
// subtracting 1 for the iteration that pulls double duty as reaching/exceeding the target and starting congestion handling
// this is enough iterations for the average to reach/exceed the target (triggering congestion handling) and then the iterations to contract down to the min
// subtracting 1 for the iteration that pulls double duty as reaching/exceeding the target and starting congestion handling