提交 6ce813b5 编写于 作者: M Matt Witherspoon

Make all wasm instances share same linear memory

Previously, each wasm instance (smart contract instance) would allocate 12GB of virtual address space for it self. While not as dire as how table memory use to work, this still will exhaust virtual memory space at an alarmingly low number of contracts.

Since in this release execution is single threaded, for now just make every wasm instance share the same 12GB. In the future we can extend this to have a “thread pool of memories”.
上级 f6fb4f94
......@@ -125,13 +125,12 @@ namespace eosio { namespace chain {
* the instance handed out to other threads
*/
struct code_info {
code_info( size_t mem_end, vector<char>&& mem_image )
:mem_end(mem_end),mem_image(std::forward<vector<char>>(mem_image))
code_info( vector<char>&& mem_image )
:mem_image(std::forward<vector<char>>(mem_image))
{}
// a clean image of the memory used to sanitize things on checkin
size_t mem_start = 0;
size_t mem_end = 1<<16;
vector<char> mem_image;
// all existing instances of this code
......@@ -215,7 +214,6 @@ namespace eosio { namespace chain {
// time to compile a brand new (maybe first) copy of this code
Module* module = new Module();
ModuleInstance* instance = nullptr;
size_t mem_end = 0;
vector<char> mem_image;
try {
......@@ -228,19 +226,22 @@ namespace eosio { namespace chain {
instance = instantiateModule(*module, std::move(link_result.resolvedImports));
FC_ASSERT(instance != nullptr);
MemoryInstance* current_memory = Runtime::getDefaultMemory(instance);
if(current_memory) {
char *mem_ptr = &memoryRef<char>(current_memory, 0);
const auto allocated_memory = Runtime::getDefaultMemorySize(instance);
for (uint64_t i = 0; i < allocated_memory; ++i) {
if (mem_ptr[i])
mem_end = i + 1;
}
mem_image.resize(mem_end);
memcpy(mem_image.data(), mem_ptr, mem_end);
//populate the module's data segments in to a vector so the initial state can be
// restored on each invocation
//Be Warned, this may need to be revisited when module imports make sense. The
// code won't handle data segments that initalize an imported memory which I think
// is valid.
for(const DataSegment& data_segment : module->dataSegments) {
FC_ASSERT(data_segment.baseOffset.type == InitializerExpression::Type::i32_const);
FC_ASSERT(module->memories.defs.size());
const U32 base_offset = data_segment.baseOffset.i32;
const Uptr memory_size = (module->memories.defs[0].type.size.min << IR::numBytesPerPageLog2);
if(base_offset >= memory_size || base_offset + data_segment.data.size() > memory_size)
FC_THROW_EXCEPTION(wasm_execution_error, "WASM data segment outside of valid memory range");
if(base_offset + data_segment.data.size() > mem_image.size())
mem_image.resize(base_offset + data_segment.data.size(), 0x00);
memcpy(mem_image.data() + base_offset, data_segment.data.data(), data_segment.data.size());
}
} catch (...) {
pending_error = std::current_exception();
}
......@@ -249,7 +250,7 @@ namespace eosio { namespace chain {
// grab the lock and put this in the cache as unavailble
with_lock(_cache_lock, [&,this]() {
// find or create a new entry
auto iter = _cache.emplace(code_id, code_info(mem_end, std::move(mem_image))).first;
auto iter = _cache.emplace(code_id, code_info(std::move(mem_image))).first;
iter->second.instances.emplace_back(std::make_unique<wasm_cache::entry>(instance, module));
pending_result = optional_entry_ref(*iter->second.instances.back().get());
......@@ -284,15 +285,7 @@ namespace eosio { namespace chain {
* @param entry - the entry to return
*/
void return_entry(const digest_type& code_id, wasm_cache::entry& entry) {
// sanitize by reseting the memory that may now be dirty
auto& info = (*fetch_info(code_id)).get();
if(getDefaultMemory(entry.instance)) {
char* memstart = &memoryRef<char>( getDefaultMemory(entry.instance), 0 );
memset( memstart + info.mem_end, 0, ((1<<16) - info.mem_end) );
memcpy( memstart, info.mem_image.data(), info.mem_end);
}
resetGlobalInstances(entry.instance);
// under a lock, put this entry back in the available instances side of the instances vector
with_lock(_cache_lock, [&,this](){
// walk the vector and find this entry
......@@ -311,6 +304,20 @@ namespace eosio { namespace chain {
});
}
//initialize the memory for a cache entry
wasm_cache::entry& prepare_wasm_instance(wasm_cache::entry& wasm_cache_entry, const digest_type& code_id) {
resetGlobalInstances(wasm_cache_entry.instance);
MemoryInstance* memory_instance = getDefaultMemory(wasm_cache_entry.instance);
if(memory_instance) {
resetMemory(memory_instance, wasm_cache_entry.module->memories.defs[0].type);
const code_info& info = (*fetch_info(code_id)).get();
char* memstart = &memoryRef<char>(getDefaultMemory(wasm_cache_entry.instance), 0);
memcpy(memstart, info.mem_image.data(), info.mem_image.size());
}
return wasm_cache_entry;
}
// mapping of digest to an entry for the code
map<digest_type, code_info> _cache;
std::mutex _cache_lock;
......@@ -329,16 +336,14 @@ namespace eosio { namespace chain {
// see if there is an available entry in the cache
auto result = _my->try_fetch_entry(code_id);
if (result) {
return (*result).get();
wasm_cache::entry& wasm_cache_entry = (*result).get();
return _my->prepare_wasm_instance(wasm_cache_entry, code_id);
}
return _my->fetch_entry(code_id, wasm_binary, wasm_binary_size);
return _my->prepare_wasm_instance(_my->fetch_entry(code_id, wasm_binary, wasm_binary_size), code_id);
}
void wasm_cache::checkin(const digest_type& code_id, entry& code ) {
MemoryInstance* default_mem = Runtime::getDefaultMemory(code.instance);
if(default_mem)
Runtime::shrinkMemory(default_mem, Runtime::getMemoryNumPages(default_mem) - 1);
_my->return_entry(code_id, code);
}
......
......@@ -217,6 +217,7 @@ namespace Runtime
RUNTIME_API void runInstanceStartFunc(ModuleInstance* moduleInstance);
RUNTIME_API void resetGlobalInstances(ModuleInstance* moduleInstance);
RUNTIME_API void resetMemory(MemoryInstance* memory, IR::MemoryType& newMemoryType);
// Gets an object exported by a ModuleInstance by name.
RUNTIME_API ObjectInstance* getInstanceExport(ModuleInstance* moduleInstance,const std::string& name);
......
......@@ -65,6 +65,8 @@ namespace Runtime
{
if(memories[memoryIndex] == this) { memories.erase(memories.begin() + memoryIndex); break; }
}
theMemoryInstance = nullptr;
}
bool isAddressOwnedByMemory(U8* address)
......@@ -86,6 +88,16 @@ namespace Runtime
return Uptr(memory->type.size.max);
}
void resetMemory(MemoryInstance* memory, MemoryType& newMemoryType) {
memory->type.size.min = 1;
if(shrinkMemory(memory, memory->numPages - 1) == -1)
causeException(Exception::Cause::outOfMemory);
memset(memory->baseAddress, 0, 1<<IR::numBytesPerPageLog2);
memory->type = newMemoryType;
if(growMemory(memory, memory->type.size.min - 1) == -1)
causeException(Exception::Cause::outOfMemory);
}
Iptr growMemory(MemoryInstance* memory,Uptr numNewPages)
{
const Uptr previousNumPages = memory->numPages;
......
......@@ -28,6 +28,8 @@ namespace Runtime
};
}
MemoryInstance* MemoryInstance::theMemoryInstance = nullptr;
ModuleInstance* instantiateModule(const IR::Module& module,ImportBindings&& imports)
{
ModuleInstance* moduleInstance = new ModuleInstance(
......@@ -72,9 +74,11 @@ namespace Runtime
}
for(const MemoryDef& memoryDef : module.memories.defs)
{
auto memory = createMemory(memoryDef.type);
if(!memory) { causeException(Exception::Cause::outOfMemory); }
moduleInstance->memories.push_back(memory);
if(!MemoryInstance::theMemoryInstance) {
MemoryInstance::theMemoryInstance = createMemory(memoryDef.type);
if(!MemoryInstance::theMemoryInstance) { causeException(Exception::Cause::outOfMemory); }
}
moduleInstance->memories.push_back(MemoryInstance::theMemoryInstance);
}
// Find the default memory and table for the module.
......@@ -100,32 +104,10 @@ namespace Runtime
|| table->elements.size() - baseOffset < tableSegment.indices.size())
{ causeException(Exception::Cause::invalidSegmentOffset); }
}
for(auto& dataSegment : module.dataSegments)
{
MemoryInstance* memory = moduleInstance->memories[dataSegment.memoryIndex];
const Value baseOffsetValue = evaluateInitializer(moduleInstance,dataSegment.baseOffset);
errorUnless(baseOffsetValue.type == ValueType::i32);
const U32 baseOffset = baseOffsetValue.i32;
const Uptr numMemoryBytes = (memory->numPages << IR::numBytesPerPageLog2);
if(baseOffset > numMemoryBytes
|| numMemoryBytes - baseOffset < dataSegment.data.size())
{ causeException(Exception::Cause::invalidSegmentOffset); }
}
// Copy the module's data segments into the module's default memory.
for(const DataSegment& dataSegment : module.dataSegments)
{
MemoryInstance* memory = moduleInstance->memories[dataSegment.memoryIndex];
const Value baseOffsetValue = evaluateInitializer(moduleInstance,dataSegment.baseOffset);
errorUnless(baseOffsetValue.type == ValueType::i32);
const U32 baseOffset = baseOffsetValue.i32;
assert(baseOffset + dataSegment.data.size() <= (memory->numPages << IR::numBytesPerPageLog2));
memcpy(memory->baseAddress + baseOffset,dataSegment.data.data(),dataSegment.data.size());
}
//Previously, the module instantiation would write in to the memoryInstance here. Don't do that
//since the memoryInstance is shared across all moduleInstances and we could be compiling
//a new instance while another instance is running
// Instantiate the module's global definitions.
for(const GlobalDef& globalDef : module.globals.defs)
......
......@@ -90,6 +90,8 @@ namespace Runtime
MemoryInstance(const MemoryType& inType): GCObject(ObjectKind::memory), type(inType), baseAddress(nullptr), numPages(0), endOffset(0), reservedBaseAddress(nullptr), reservedNumPlatformPages(0) {}
~MemoryInstance() override;
static MemoryInstance* theMemoryInstance;
};
// An instance of a WebAssembly global.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册