提交 9b3c9643 编写于 作者: F fengjiayi

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into...

Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into modify_readers_to_fit_parallel_executor
...@@ -244,11 +244,11 @@ function(cc_test TARGET_NAME) ...@@ -244,11 +244,11 @@ function(cc_test TARGET_NAME)
cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
add_executable(${TARGET_NAME} ${cc_test_SRCS}) add_executable(${TARGET_NAME} ${cc_test_SRCS})
# Support linking flags: --whole-archive (Linux) / -force_load (MacOS) # Support linking flags: --whole-archive (Linux) / -force_load (MacOS)
target_circle_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main paddle_memory gtest gflags glog) target_circle_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main memory gtest gflags glog)
if("${cc_test_DEPS}" MATCHES "ARCHIVE_START") if("${cc_test_DEPS}" MATCHES "ARCHIVE_START")
list(REMOVE_ITEM cc_test_DEPS ARCHIVE_START ARCHIVE_END) list(REMOVE_ITEM cc_test_DEPS ARCHIVE_START ARCHIVE_END)
endif() endif()
add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main paddle_memory gtest gflags glog) add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main memory gtest gflags glog)
add_test(NAME ${TARGET_NAME} add_test(NAME ${TARGET_NAME}
COMMAND ${TARGET_NAME} ${cc_test_ARGS} COMMAND ${TARGET_NAME} ${cc_test_ARGS}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
...@@ -311,8 +311,8 @@ function(nv_test TARGET_NAME) ...@@ -311,8 +311,8 @@ function(nv_test TARGET_NAME)
set(multiValueArgs SRCS DEPS) set(multiValueArgs SRCS DEPS)
cmake_parse_arguments(nv_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cmake_parse_arguments(nv_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
cuda_add_executable(${TARGET_NAME} ${nv_test_SRCS}) cuda_add_executable(${TARGET_NAME} ${nv_test_SRCS})
target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main paddle_memory gtest gflags glog) target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main memory gtest gflags glog)
add_dependencies(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main paddle_memory gtest gflags glog) add_dependencies(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main memory gtest gflags glog)
add_test(${TARGET_NAME} ${TARGET_NAME}) add_test(${TARGET_NAME} ${TARGET_NAME})
endif() endif()
endfunction(nv_test) endfunction(nv_test)
...@@ -387,8 +387,8 @@ function(hip_test TARGET_NAME) ...@@ -387,8 +387,8 @@ function(hip_test TARGET_NAME)
endif() endif()
add_executable(${TARGET_NAME} ${_cmake_options} ${_generated_files} ${_sources}) add_executable(${TARGET_NAME} ${_cmake_options} ${_generated_files} ${_sources})
set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE HIP) set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE HIP)
target_link_libraries(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) target_link_libraries(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main memory gtest gflags)
add_dependencies(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main paddle_memory gtest gflags) add_dependencies(${TARGET_NAME} ${hip_test_DEPS} paddle_gtest_main memory gtest gflags)
add_test(${TARGET_NAME} ${TARGET_NAME}) add_test(${TARGET_NAME} ${TARGET_NAME})
endif() endif()
endfunction(hip_test) endfunction(hip_test)
......
...@@ -7,9 +7,9 @@ cc_test(ddim_test SRCS ddim_test.cc DEPS ddim) ...@@ -7,9 +7,9 @@ cc_test(ddim_test SRCS ddim_test.cc DEPS ddim)
nv_test(dim_test SRCS dim_test.cu DEPS ddim) nv_test(dim_test SRCS dim_test.cu DEPS ddim)
if(WITH_GPU) if(WITH_GPU)
nv_library(tensor SRCS tensor.cc tensor_util.cu DEPS ddim place paddle_memory device_context framework_proto) nv_library(tensor SRCS tensor.cc tensor_util.cu DEPS ddim place memory device_context framework_proto)
else() else()
cc_library(tensor SRCS tensor.cc tensor_util.cc DEPS ddim place paddle_memory device_context framework_proto) cc_library(tensor SRCS tensor.cc tensor_util.cc DEPS ddim place memory device_context framework_proto)
endif() endif()
cc_test(tensor_test SRCS tensor_test.cc DEPS tensor) cc_test(tensor_test SRCS tensor_test.cc DEPS tensor)
...@@ -21,9 +21,9 @@ endif() ...@@ -21,9 +21,9 @@ endif()
cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) cc_test(eigen_test SRCS eigen_test.cc DEPS tensor)
nv_test(mixed_vector_test SRCS mixed_vector_test.cu DEPS place paddle_memory device_context init) nv_test(mixed_vector_test SRCS mixed_vector_test.cu DEPS place memory device_context init)
cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor framework_proto recordio) cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor framework_proto recordio)
cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor paddle_memory) cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor memory)
nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor init) nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor init)
cc_library(reader SRCS reader.cc DEPS lod_tensor ddim) cc_library(reader SRCS reader.cc DEPS lod_tensor ddim)
......
...@@ -15,7 +15,6 @@ limitations under the License. */ ...@@ -15,7 +15,6 @@ limitations under the License. */
#include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/scope.h"
#include <memory> // for unique_ptr #include <memory> // for unique_ptr
#include <mutex> // for call_once
#include <set> #include <set>
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/fluid/framework/threadpool.h" #include "paddle/fluid/framework/threadpool.h"
...@@ -39,6 +38,7 @@ Scope::~Scope() { ...@@ -39,6 +38,7 @@ Scope::~Scope() {
} }
Scope& Scope::NewScope() const { Scope& Scope::NewScope() const {
std::unique_lock<std::mutex> lock(mutex_);
kids_.push_back(new Scope(this)); kids_.push_back(new Scope(this));
return *kids_.back(); return *kids_.back();
} }
...@@ -92,6 +92,7 @@ std::vector<std::string> Scope::LocalVarNames() const { ...@@ -92,6 +92,7 @@ std::vector<std::string> Scope::LocalVarNames() const {
} }
void Scope::DeleteScope(Scope* scope) { void Scope::DeleteScope(Scope* scope) {
std::unique_lock<std::mutex> lock(mutex_);
auto it = std::find(this->kids_.begin(), this->kids_.end(), scope); auto it = std::find(this->kids_.begin(), this->kids_.end(), scope);
PADDLE_ENFORCE(it != this->kids_.end(), "Cannot find %p as kid scope", scope); PADDLE_ENFORCE(it != this->kids_.end(), "Cannot find %p as kid scope", scope);
this->kids_.erase(it); this->kids_.erase(it);
...@@ -103,7 +104,7 @@ void Scope::DeleteScope(Scope* scope) { ...@@ -103,7 +104,7 @@ void Scope::DeleteScope(Scope* scope) {
} }
} }
void Scope::EraseVars(std::vector<std::string>& var_names) { void Scope::EraseVars(const std::vector<std::string>& var_names) {
std::set<std::string> var_set(var_names.begin(), var_names.end()); std::set<std::string> var_set(var_names.begin(), var_names.end());
for (auto it = vars_.begin(); it != vars_.end();) { for (auto it = vars_.begin(); it != vars_.end();) {
if (var_set.find(it->first) != var_set.end()) { if (var_set.find(it->first) != var_set.end()) {
......
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#pragma once #pragma once
#include <list> #include <list>
#include <mutex> // NOLINT
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
...@@ -51,7 +52,7 @@ class Scope { ...@@ -51,7 +52,7 @@ class Scope {
/// Create a variable with a scope-unique name. /// Create a variable with a scope-unique name.
Variable* Var(std::string* name = nullptr); Variable* Var(std::string* name = nullptr);
void EraseVars(std::vector<std::string>& var_names); void EraseVars(const std::vector<std::string>& var_names);
/// Find a variable in the scope or any of its ancestors. Returns /// Find a variable in the scope or any of its ancestors. Returns
/// nullptr if cannot find. /// nullptr if cannot find.
...@@ -88,6 +89,9 @@ class Scope { ...@@ -88,6 +89,9 @@ class Scope {
Scope const* parent_{nullptr}; Scope const* parent_{nullptr};
DISABLE_COPY_AND_ASSIGN(Scope); DISABLE_COPY_AND_ASSIGN(Scope);
private:
mutable std::mutex mutex_;
}; };
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
set(FLUID_CORE_MODULES proto_desc paddle_memory lod_tensor executor prune init) set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor prune init)
cc_library(paddle_fluid_api cc_library(paddle_fluid_api
SRCS io.cc SRCS io.cc
......
add_subdirectory(detail) add_subdirectory(detail)
cc_library(memory SRCS memory.cc DEPS place enforce) cc_library(malloc SRCS malloc.cc DEPS buddy_allocator place enforce)
cc_library(memcpy SRCS memcpy.cc DEPS place) cc_library(memcpy SRCS memcpy.cc DEPS place)
cc_library(paddle_memory cc_library(memory
DEPS DEPS
memory malloc
memcpy memcpy)
meta_data
meta_cache
memory_block
buddy_allocator
system_allocator)
cc_test(memory_test SRCS memory_test.cc DEPS place paddle_memory) cc_test(malloc_test SRCS malloc_test.cc DEPS malloc)
#if (WITH_GPU) #if (WITH_GPU)
# nv_test(pinned_memory_test SRCS pinned_memory_test.cu DEPS place paddle_memory) # nv_test(pinned_memory_test SRCS pinned_memory_test.cu DEPS place memory)
#endif() #endif()
cc_library(memory_block SRCS memory_block.cc memory_block_desc.cc meta_cache.cc)
if(${WITH_GPU}) if(${WITH_GPU})
nv_library(system_allocator SRCS system_allocator.cc DEPS gflags cpu_info gpu_info) nv_library(system_allocator SRCS system_allocator.cc DEPS gflags cpu_info gpu_info)
else(${WITH_GPU}) else(${WITH_GPU})
...@@ -6,10 +8,4 @@ endif(${WITH_GPU}) ...@@ -6,10 +8,4 @@ endif(${WITH_GPU})
cc_test(system_allocator_test SRCS system_allocator_test.cc DEPS system_allocator) cc_test(system_allocator_test SRCS system_allocator_test.cc DEPS system_allocator)
cc_library(meta_data SRCS meta_data.cc) cc_library(buddy_allocator SRCS buddy_allocator.cc DEPS memory_block system_allocator glog)
cc_library(meta_cache SRCS meta_cache.cc)
cc_library(memory_block SRCS memory_block.cc)
cc_library(buddy_allocator SRCS buddy_allocator.cc DEPS glog)
...@@ -46,7 +46,8 @@ inline size_t align(size_t size, size_t alignment) { ...@@ -46,7 +46,8 @@ inline size_t align(size_t size, size_t alignment) {
void* BuddyAllocator::Alloc(size_t unaligned_size) { void* BuddyAllocator::Alloc(size_t unaligned_size) {
// adjust allocation alignment // adjust allocation alignment
size_t size = align(unaligned_size + sizeof(Metadata), min_chunk_size_); size_t size =
align(unaligned_size + sizeof(MemoryBlock::Desc), min_chunk_size_);
// acquire the allocator lock // acquire the allocator lock
std::lock_guard<std::mutex> lock(mutex_); std::lock_guard<std::mutex> lock(mutex_);
...@@ -103,7 +104,7 @@ void BuddyAllocator::Free(void* p) { ...@@ -103,7 +104,7 @@ void BuddyAllocator::Free(void* p) {
return; return;
} }
block->mark_as_free(cache_); block->mark_as_free(&cache_);
total_used_ -= block->total_size(cache_); total_used_ -= block->total_size(cache_);
total_free_ += block->total_size(cache_); total_free_ += block->total_size(cache_);
...@@ -122,7 +123,7 @@ void BuddyAllocator::Free(void* p) { ...@@ -122,7 +123,7 @@ void BuddyAllocator::Free(void* p) {
right_buddy)); right_buddy));
// merge its right buddy to the block // merge its right buddy to the block
block->merge(cache_, right_buddy); block->merge(&cache_, right_buddy);
} }
} }
...@@ -139,7 +140,7 @@ void BuddyAllocator::Free(void* p) { ...@@ -139,7 +140,7 @@ void BuddyAllocator::Free(void* p) {
left_buddy->total_size(cache_), left_buddy)); left_buddy->total_size(cache_), left_buddy));
// merge the block to its left buddy // merge the block to its left buddy
left_buddy->merge(cache_, block); left_buddy->merge(&cache_, block);
block = left_buddy; block = left_buddy;
} }
} }
...@@ -163,13 +164,13 @@ size_t BuddyAllocator::Used() { return total_used_; } ...@@ -163,13 +164,13 @@ size_t BuddyAllocator::Used() { return total_used_; }
void* BuddyAllocator::SystemAlloc(size_t size) { void* BuddyAllocator::SystemAlloc(size_t size) {
size_t index = 0; size_t index = 0;
void* p = system_allocator_->Alloc(index, size); void* p = system_allocator_->Alloc(&index, size);
VLOG(10) << "Allocated " << p << " from system allocator."; VLOG(10) << "Allocated " << p << " from system allocator.";
if (p == nullptr) return nullptr; if (p == nullptr) return nullptr;
static_cast<MemoryBlock*>(p)->init(cache_, MemoryBlock::HUGE_CHUNK, index, static_cast<MemoryBlock*>(p)->init(&cache_, MemoryBlock::HUGE_CHUNK, index,
size, nullptr, nullptr); size, nullptr, nullptr);
return static_cast<MemoryBlock*>(p)->data(); return static_cast<MemoryBlock*>(p)->data();
...@@ -187,14 +188,14 @@ BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool() { ...@@ -187,14 +188,14 @@ BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool() {
// Allocate a new maximum sized block // Allocate a new maximum sized block
size_t index = 0; size_t index = 0;
void* p = system_allocator_->Alloc(index, max_chunk_size_); void* p = system_allocator_->Alloc(&index, max_chunk_size_);
if (p == nullptr) return pool_.end(); if (p == nullptr) return pool_.end();
VLOG(10) << "Creating and inserting new block " << p VLOG(10) << "Creating and inserting new block " << p
<< " from system allocator"; << " from system allocator";
static_cast<MemoryBlock*>(p)->init(cache_, MemoryBlock::FREE_CHUNK, index, static_cast<MemoryBlock*>(p)->init(&cache_, MemoryBlock::FREE_CHUNK, index,
max_chunk_size_, nullptr, nullptr); max_chunk_size_, nullptr, nullptr);
// gpu fallback allocation // gpu fallback allocation
...@@ -238,11 +239,11 @@ void* BuddyAllocator::SplitToAlloc(BuddyAllocator::PoolSet::iterator it, ...@@ -238,11 +239,11 @@ void* BuddyAllocator::SplitToAlloc(BuddyAllocator::PoolSet::iterator it,
VLOG(10) << "Split block (" << block << ", " << block->total_size(cache_) VLOG(10) << "Split block (" << block << ", " << block->total_size(cache_)
<< ") into"; << ") into";
block->split(cache_, size); block->split(&cache_, size);
VLOG(10) << "Left block (" << block << ", " << block->total_size(cache_) VLOG(10) << "Left block (" << block << ", " << block->total_size(cache_)
<< ")"; << ")";
block->set_type(cache_, MemoryBlock::ARENA_CHUNK); block->set_type(&cache_, MemoryBlock::ARENA_CHUNK);
// the rest of memory if exist // the rest of memory if exist
if (block->has_right_buddy(cache_)) { if (block->has_right_buddy(cache_)) {
......
...@@ -14,18 +14,18 @@ limitations under the License. */ ...@@ -14,18 +14,18 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/fluid/memory/detail/meta_cache.h" #include <mutex> // NOLINT
#include "paddle/fluid/memory/detail/meta_data.h" #include <set>
#include <tuple>
#include <unordered_map>
#include <vector>
#include "paddle/fluid/memory/detail/memory_block.h"
#include "paddle/fluid/memory/detail/system_allocator.h" #include "paddle/fluid/memory/detail/system_allocator.h"
#include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/assert.h"
#include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/gpu_info.h"
#include <mutex>
#include <set>
#include <unordered_map>
#include <vector>
namespace paddle { namespace paddle {
namespace memory { namespace memory {
namespace detail { namespace detail {
......
...@@ -13,143 +13,142 @@ See the License for the specific language governing permissions and ...@@ -13,143 +13,142 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/memory/detail/memory_block.h"
#include "paddle/fluid/memory/detail/meta_cache.h"
#include "paddle/fluid/memory/detail/meta_data.h"
#include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/assert.h"
namespace paddle { namespace paddle {
namespace memory { namespace memory {
namespace detail { namespace detail {
void MemoryBlock::init(MetadataCache& cache, Type t, size_t index, size_t size, void MemoryBlock::init(MetadataCache* cache, Type t, size_t index, size_t size,
void* left_buddy, void* right_buddy) { void* left_buddy, void* right_buddy) {
cache.store(this, Metadata(t, index, size - sizeof(Metadata), size, cache->save(
static_cast<MemoryBlock*>(left_buddy), this, MemoryBlock::Desc(t, index, size - sizeof(MemoryBlock::Desc), size,
static_cast<MemoryBlock*>(right_buddy))); static_cast<MemoryBlock*>(left_buddy),
static_cast<MemoryBlock*>(right_buddy)));
} }
MemoryBlock::Type MemoryBlock::type(MetadataCache& cache) const { MemoryBlock::Type MemoryBlock::type(const MetadataCache& cache) const {
return cache.load(this).type; return cache.load(this).type;
} }
size_t MemoryBlock::size(MetadataCache& cache) const { size_t MemoryBlock::size(const MetadataCache& cache) const {
return cache.load(this).size; return cache.load(this).size;
} }
size_t MemoryBlock::total_size(MetadataCache& cache) const { size_t MemoryBlock::index(const MetadataCache& cache) const {
return cache.load(this).index;
}
size_t MemoryBlock::total_size(const MetadataCache& cache) const {
return cache.load(this).total_size; return cache.load(this).total_size;
} }
MemoryBlock* MemoryBlock::left_buddy(MetadataCache& cache) const { bool MemoryBlock::has_left_buddy(const MetadataCache& cache) const {
return left_buddy(cache) != nullptr;
}
bool MemoryBlock::has_right_buddy(const MetadataCache& cache) const {
return right_buddy(cache) != nullptr;
}
MemoryBlock* MemoryBlock::left_buddy(const MetadataCache& cache) const {
return cache.load(this).left_buddy; return cache.load(this).left_buddy;
} }
MemoryBlock* MemoryBlock::right_buddy(MetadataCache& cache) const { MemoryBlock* MemoryBlock::right_buddy(const MetadataCache& cache) const {
return cache.load(this).right_buddy; return cache.load(this).right_buddy;
} }
void MemoryBlock::split(MetadataCache& cache, size_t size) { void MemoryBlock::split(MetadataCache* cache, size_t size) {
// make sure the split fits // make sure the split fits
PADDLE_ASSERT(total_size(cache) >= size); PADDLE_ASSERT(total_size(*cache) >= size);
// bail out if there is no room for another partition // bail out if there is no room for another partition
if (total_size(cache) - size <= sizeof(Metadata)) { if (total_size(*cache) - size <= sizeof(MemoryBlock::Desc)) {
return; return;
} }
// find the position of the split // find the position of the split
void* right_partition = reinterpret_cast<uint8_t*>(this) + size; void* right_partition = reinterpret_cast<uint8_t*>(this) + size;
size_t remaining_size = total_size(cache) - size; size_t remaining_size = total_size(*cache) - size;
// Add the new block as a buddy // Add the new block as a buddy
auto metadata = cache.load(this); auto metadata = cache->load(this);
// Write the metadata for the new block // Write the metadata for the new block
auto new_block_right_buddy = metadata.right_buddy; auto new_block_right_buddy = metadata.right_buddy;
cache.store( cache->save(static_cast<MemoryBlock*>(right_partition),
static_cast<MemoryBlock*>(right_partition), MemoryBlock::Desc(FREE_CHUNK, index(*cache),
Metadata(FREE_CHUNK, index(cache), remaining_size - sizeof(Metadata), remaining_size - sizeof(MemoryBlock::Desc),
remaining_size, this, new_block_right_buddy)); remaining_size, this, new_block_right_buddy));
metadata.right_buddy = static_cast<MemoryBlock*>(right_partition); metadata.right_buddy = static_cast<MemoryBlock*>(right_partition);
metadata.size = size - sizeof(Metadata); metadata.size = size - sizeof(MemoryBlock::Desc);
metadata.total_size = size; metadata.total_size = size;
cache.store(this, metadata); cache->save(this, metadata);
// Write metadata for the new block's right buddy // Write metadata for the new block's right buddy
if (new_block_right_buddy != nullptr) { if (new_block_right_buddy != nullptr) {
auto buddy_metadata = cache.load(new_block_right_buddy); auto buddy_metadata = cache->load(new_block_right_buddy);
buddy_metadata.left_buddy = static_cast<MemoryBlock*>(right_partition); buddy_metadata.left_buddy = static_cast<MemoryBlock*>(right_partition);
cache.store(new_block_right_buddy, buddy_metadata); cache->save(new_block_right_buddy, buddy_metadata);
} }
} }
void MemoryBlock::merge(MetadataCache& cache, MemoryBlock* right_buddy) { void MemoryBlock::merge(MetadataCache* cache, MemoryBlock* right_buddy) {
// only free blocks can be merged // only free blocks can be merged
PADDLE_ASSERT(type(cache) == FREE_CHUNK); PADDLE_ASSERT(type(*cache) == FREE_CHUNK);
PADDLE_ASSERT(right_buddy->type(cache) == FREE_CHUNK); PADDLE_ASSERT(right_buddy->type(*cache) == FREE_CHUNK);
auto metadata = cache.load(this); auto metadata = cache->load(this);
// link this->buddy's buddy // link this->buddy's buddy
metadata.right_buddy = right_buddy->right_buddy(cache); metadata.right_buddy = right_buddy->right_buddy(*cache);
// link buddy's buddy -> this // link buddy's buddy -> this
if (metadata.right_buddy != nullptr) { if (metadata.right_buddy != nullptr) {
auto buddy_metadata = cache.load(metadata.right_buddy); auto buddy_metadata = cache->load(metadata.right_buddy);
buddy_metadata.left_buddy = this; buddy_metadata.left_buddy = this;
cache.store(metadata.right_buddy, buddy_metadata); cache->save(metadata.right_buddy, buddy_metadata);
} }
metadata.size += right_buddy->total_size(cache); metadata.size += right_buddy->total_size(*cache);
metadata.total_size += right_buddy->total_size(cache); metadata.total_size += right_buddy->total_size(*cache);
cache.store(this, metadata); cache->save(this, metadata);
cache.store(right_buddy, Metadata(INVALID_CHUNK, 0, 0, 0, nullptr, nullptr)); cache->save(right_buddy,
MemoryBlock::Desc(INVALID_CHUNK, 0, 0, 0, nullptr, nullptr));
} }
void MemoryBlock::mark_as_free(MetadataCache& cache) { void MemoryBlock::mark_as_free(MetadataCache* cache) {
// check for double free or corruption // check for double free or corruption
PADDLE_ASSERT(type(cache) != FREE_CHUNK); PADDLE_ASSERT(type(*cache) != FREE_CHUNK);
PADDLE_ASSERT(type(cache) != INVALID_CHUNK); PADDLE_ASSERT(type(*cache) != INVALID_CHUNK);
set_type(cache, FREE_CHUNK); set_type(cache, FREE_CHUNK);
} }
void MemoryBlock::set_type(MetadataCache& cache, Type t) { void MemoryBlock::set_type(MetadataCache* cache, Type t) {
auto metadata = cache.load(this); auto metadata = cache->load(this);
metadata.type = t; metadata.type = t;
cache->save(this, metadata);
cache.store(this, metadata);
}
bool MemoryBlock::has_left_buddy(MetadataCache& cache) const {
return left_buddy(cache) != nullptr;
}
bool MemoryBlock::has_right_buddy(MetadataCache& cache) const {
return right_buddy(cache) != nullptr;
}
size_t MemoryBlock::index(MetadataCache& cache) const {
return cache.load(this).index;
} }
void* MemoryBlock::data() const { void* MemoryBlock::data() const {
return const_cast<Metadata*>(reinterpret_cast<const Metadata*>(this)) + 1; return const_cast<MemoryBlock::Desc*>(
reinterpret_cast<const MemoryBlock::Desc*>(this)) +
1;
} }
MemoryBlock* MemoryBlock::metadata() const { MemoryBlock* MemoryBlock::metadata() const {
return const_cast<MemoryBlock*>(reinterpret_cast<const MemoryBlock*>( return const_cast<MemoryBlock*>(reinterpret_cast<const MemoryBlock*>(
reinterpret_cast<const Metadata*>(this) - 1)); reinterpret_cast<const MemoryBlock::Desc*>(this) - 1));
} }
} // namespace detail } // namespace detail
......
...@@ -11,21 +11,21 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,21 +11,21 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <cstddef> #include <cstdint>
#include <unordered_map>
namespace paddle { namespace paddle {
namespace memory { namespace memory {
namespace detail { namespace detail {
// Forward Declarations // Forward declaration.
class MetadataCache; class MetadataCache;
/*! \brief A class used to interpret the contents of a memory block */ // MemoryBlock represents Each allocated memory block, which contains
class MemoryBlock { // MemoryBlock::Desc and the payload.
public: struct MemoryBlock {
enum Type { enum Type {
FREE_CHUNK, // memory is free and idle FREE_CHUNK, // memory is free and idle
ARENA_CHUNK, // memory is being occupied ARENA_CHUNK, // memory is being occupied
...@@ -33,57 +33,96 @@ class MemoryBlock { ...@@ -33,57 +33,96 @@ class MemoryBlock {
INVALID_CHUNK // memory is invalid INVALID_CHUNK // memory is invalid
}; };
public: // init saves the MemoryBlock::Desc of the memory block in a MetadataCache.
void init(MetadataCache& cache, Type t, size_t index, size_t size, // If it is a CPU memory block, the MetadataCache writes the
// MemoryBlock::Desc to the beginning of the block; or, if it is a GPU memory
// block, the MetadataCache writes the Meatadata to a std::map in
// the CPU.
void init(MetadataCache* cache, Type t, size_t index, size_t size,
void* left_buddy, void* right_buddy); void* left_buddy, void* right_buddy);
public: // All these accessors returns fields in the MemoryBlock::Desc of the memory
/*! \brief The type of the allocation */ // block. They all need a MetadataCache instance as their first
Type type(MetadataCache& cache) const; // parameter because they read the MemoryBlock::Desc from the cache.
Type type(const MetadataCache& cache) const;
/*! \brief The size of the data region */ size_t size(const MetadataCache& cache) const;
size_t size(MetadataCache& cache) const; size_t index(const MetadataCache& cache) const;
size_t total_size(const MetadataCache& cache) const;
bool has_left_buddy(const MetadataCache& cache) const;
bool has_right_buddy(const MetadataCache& cache) const;
MemoryBlock* left_buddy(const MetadataCache& cache) const;
MemoryBlock* right_buddy(const MetadataCache& cache) const;
/*! \brief An index to track the allocator */ // Split the allocation into left/right blocks.
size_t index(MetadataCache& cache) const; void split(MetadataCache* cache, size_t size);
/*! \brief The total size of the block */ // Merge left and right blocks together.
size_t total_size(MetadataCache& cache) const; void merge(MetadataCache* cache, MemoryBlock* right_buddy);
/*! \brief Check the left buddy of the block */ // Mark the allocation as free.
bool has_left_buddy(MetadataCache& cache) const; void mark_as_free(MetadataCache* cache);
/*! \brief Check the right buddy of the block */ // Change the type of the allocation.
bool has_right_buddy(MetadataCache& cache) const; void set_type(MetadataCache* cache, Type t);
/*! \brief Get the left buddy */
MemoryBlock* left_buddy(MetadataCache& cache) const;
/*! \brief Get the right buddy */
MemoryBlock* right_buddy(MetadataCache& cache) const;
public:
/*! \brief Split the allocation into left/right blocks */
void split(MetadataCache& cache, size_t size);
/*! \brief Merge left and right blocks together */
void merge(MetadataCache& cache, MemoryBlock* right_buddy);
/*! \brief Mark the allocation as free */
void mark_as_free(MetadataCache& cache);
/*! \brief Change the type of the allocation */
void set_type(MetadataCache& cache, Type t);
public:
/*! \brief Get a pointer to the memory block's data */
void* data() const; void* data() const;
/*! \brief Get a pointer to the memory block's metadata */
MemoryBlock* metadata() const; MemoryBlock* metadata() const;
// MemoryBlock::Desc describes a MemoryBlock.
struct Desc {
Desc(MemoryBlock::Type t, size_t i, size_t s, size_t ts, MemoryBlock* l,
MemoryBlock* r);
Desc();
// Updates guard_begin and guard_end by hashes of the Metadata object.
void update_guards();
// Checks that guard_begin and guard_end are hashes of the Metadata object.
bool check_guards() const;
// TODO(gangliao): compress this
size_t guard_begin = 0;
MemoryBlock::Type type = MemoryBlock::INVALID_CHUNK;
size_t index = 0;
size_t size = 0;
size_t total_size = 0;
MemoryBlock* left_buddy = nullptr;
MemoryBlock* right_buddy = nullptr;
size_t guard_end = 0;
};
};
// A cache for accessing memory block meta-data that may be expensive
// to access directly. This class exists to unify the
// MemoryBlock::Desc format between GPU and CPU allocations. It should
// be removed when the CPU can access all GPU allocations directly via
// UVM.
class MetadataCache {
public: public:
static size_t overhead(); explicit MetadataCache(bool uses_gpu);
// Disable copying and assignment.
MetadataCache(const MetadataCache&) = delete;
MetadataCache& operator=(const MetadataCache&) = delete;
// Returns the MemoryBlock::Desc for a memory block. When MetadataCache is
// used to manage CPU memory, the MemoryBlock::Desc resides at the beginning
// of the memory block; when used to manage GPU memory, the
// Meatadata resides in CPU memory indexed by cache_.
MemoryBlock::Desc load(const MemoryBlock* memory_block) const;
// Saves the MemoryBlock::Desc of a memory block into the cache. For CPU
// memory block, writes the MemoryBlock::Desc to the beginning of the memory
// block; whereas for GPU memory, writes it to cache_.
void save(MemoryBlock* memory_block, const MemoryBlock::Desc& meta_data);
// For GPU memory block, erases its MemoryBlock::Desc from cache_.
void invalidate(MemoryBlock* memory_block);
private:
typedef std::unordered_map<const MemoryBlock*, MemoryBlock::Desc> MetadataMap;
MetadataMap cache_;
bool uses_gpu_;
}; };
} // namespace detail } // namespace detail
......
...@@ -12,16 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,16 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/memory/detail/meta_data.h"
#include <functional> #include <functional>
#include "paddle/fluid/memory/detail/memory_block.h"
namespace paddle { namespace paddle {
namespace memory { namespace memory {
namespace detail { namespace detail {
Metadata::Metadata(MemoryBlock::Type t, size_t i, size_t s, size_t ts, MemoryBlock::Desc::Desc(MemoryBlock::Type t, size_t i, size_t s, size_t ts,
MemoryBlock* l, MemoryBlock* r) MemoryBlock* l, MemoryBlock* r)
: type(t), : type(t),
index(i), index(i),
size(s), size(s),
...@@ -29,7 +29,7 @@ Metadata::Metadata(MemoryBlock::Type t, size_t i, size_t s, size_t ts, ...@@ -29,7 +29,7 @@ Metadata::Metadata(MemoryBlock::Type t, size_t i, size_t s, size_t ts,
left_buddy(l), left_buddy(l),
right_buddy(r) {} right_buddy(r) {}
Metadata::Metadata() MemoryBlock::Desc::Desc()
: type(MemoryBlock::INVALID_CHUNK), : type(MemoryBlock::INVALID_CHUNK),
index(0), index(0),
size(0), size(0),
...@@ -37,32 +37,36 @@ Metadata::Metadata() ...@@ -37,32 +37,36 @@ Metadata::Metadata()
left_buddy(nullptr), left_buddy(nullptr),
right_buddy(nullptr) {} right_buddy(nullptr) {}
namespace {
template <class T> template <class T>
inline void hash_combine(std::size_t& seed, const T& v) { inline void hash_combine(std::size_t* seed, const T& v) {
std::hash<T> hasher; std::hash<T> hasher;
seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); (*seed) ^= hasher(v) + 0x9e3779b9 + ((*seed) << 6) + ((*seed) >> 2);
} }
inline size_t hash(const Metadata* metadata, size_t initial_seed) { inline size_t hash(const MemoryBlock::Desc& metadata, size_t initial_seed) {
size_t seed = initial_seed; size_t seed = initial_seed;
hash_combine(seed, (size_t)metadata->type); hash_combine(&seed, static_cast<size_t>(metadata.type));
hash_combine(seed, metadata->index); hash_combine(&seed, metadata.index);
hash_combine(seed, metadata->size); hash_combine(&seed, metadata.size);
hash_combine(seed, metadata->total_size); hash_combine(&seed, metadata.total_size);
hash_combine(seed, metadata->left_buddy); hash_combine(&seed, metadata.left_buddy);
hash_combine(seed, metadata->right_buddy); hash_combine(&seed, metadata.right_buddy);
return seed; return seed;
} }
void Metadata::update_guards() { } // namespace
guard_begin = hash(this, 1);
guard_end = hash(this, 2); void MemoryBlock::Desc::update_guards() {
guard_begin = hash(*this, 1);
guard_end = hash(*this, 2);
} }
bool Metadata::check_guards() const { bool MemoryBlock::Desc::check_guards() const {
return guard_begin == hash(this, 1) && guard_end == hash(this, 2); return guard_begin == hash(*this, 1) && guard_end == hash(*this, 2);
} }
} // namespace detail } // namespace detail
......
...@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/memory/detail/meta_cache.h"
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/memory/detail/memory_block.h"
#include "paddle/fluid/platform/assert.h" #include "paddle/fluid/platform/assert.h"
...@@ -23,29 +22,28 @@ namespace detail { ...@@ -23,29 +22,28 @@ namespace detail {
MetadataCache::MetadataCache(bool uses_gpu) : uses_gpu_(uses_gpu) {} MetadataCache::MetadataCache(bool uses_gpu) : uses_gpu_(uses_gpu) {}
Metadata MetadataCache::load(const MemoryBlock* block) { MemoryBlock::Desc MetadataCache::load(const MemoryBlock* block) const {
if (uses_gpu_) { if (uses_gpu_) {
auto existing_metadata = cache_.find(block); auto existing_desc = cache_.find(block);
PADDLE_ASSERT(existing_metadata->second.check_guards()); PADDLE_ASSERT(existing_desc->second.check_guards());
return existing_metadata->second; return existing_desc->second;
} else { } else {
auto* meta = reinterpret_cast<const Metadata*>(block); auto* desc = reinterpret_cast<const MemoryBlock::Desc*>(block);
VLOG(10) << "Load MetaData type=" << meta->type; VLOG(10) << "Load MemoryBlock::Desc type=" << desc->type;
PADDLE_ASSERT(meta->check_guards()); PADDLE_ASSERT(desc->check_guards());
return *reinterpret_cast<const Metadata*>(block); return *reinterpret_cast<const MemoryBlock::Desc*>(block);
} }
} }
void MetadataCache::store(MemoryBlock* block, void MetadataCache::save(MemoryBlock* block,
const Metadata& original_metadata) { const MemoryBlock::Desc& original_desc) {
auto metadata = original_metadata; auto desc = original_desc;
desc.update_guards();
metadata.update_guards();
if (uses_gpu_) { if (uses_gpu_) {
cache_[block] = metadata; cache_[block] = desc;
} else { } else {
*reinterpret_cast<Metadata*>(block) = metadata; *reinterpret_cast<MemoryBlock::Desc*>(block) = desc;
} }
} }
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/memory/detail/memory_block.h"
#include "paddle/fluid/memory/detail/meta_data.h"
#include <unordered_map>
namespace paddle {
namespace memory {
namespace detail {
/**
* \brief A cache for accessing memory block meta-data that may be expensive
* to access directly.
*
* \note This class exists to unify the metadata format between GPU and CPU
* allocations. It should be removed when the CPU can access all GPU
* allocations directly via UVM.
*/
class MetadataCache {
public:
explicit MetadataCache(bool uses_gpu);
public:
/*! \brief Load the associated metadata for the specified memory block. */
Metadata load(const MemoryBlock* memory_block);
/*! \brief Store the associated metadata for the specified memory block. */
void store(MemoryBlock* memory_block, const Metadata& meta_data);
/*! \brief Indicate that the specified metadata will no longer be used. */
void invalidate(MemoryBlock* memory_block);
public:
MetadataCache(const MetadataCache&) = delete;
MetadataCache& operator=(const MetadataCache&) = delete;
private:
bool uses_gpu_;
private:
typedef std::unordered_map<const MemoryBlock*, Metadata> MetadataMap;
private:
MetadataMap cache_;
};
} // namespace detail
} // namespace memory
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/memory/detail/memory_block.h"
#include <stddef.h>
namespace paddle {
namespace memory {
namespace detail {
class Metadata {
public:
Metadata(MemoryBlock::Type t, size_t i, size_t s, size_t ts, MemoryBlock* l,
MemoryBlock* r);
Metadata();
public:
/*! \brief Update the guards when metadata is changed */
void update_guards();
/*! \brief Check consistency to previous modification */
bool check_guards() const;
public:
// TODO(gangliao): compress this
// clang-format off
size_t guard_begin = 0;
MemoryBlock::Type type = MemoryBlock::INVALID_CHUNK;
size_t index = 0;
size_t size = 0;
size_t total_size = 0;
MemoryBlock* left_buddy = nullptr;
MemoryBlock* right_buddy = nullptr;
size_t guard_end = 0;
// clang-format on
};
} // namespace detail
} // namespace memory
} // namespace paddle
...@@ -13,16 +13,16 @@ See the License for the specific language governing permissions and ...@@ -13,16 +13,16 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/memory/detail/system_allocator.h" #include "paddle/fluid/memory/detail/system_allocator.h"
#include "paddle/fluid/platform/assert.h"
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/gpu_info.h"
#include <stdlib.h> // for malloc and free #include <stdlib.h> // for malloc and free
#include <sys/mman.h> // for mlock and munlock #include <sys/mman.h> // for mlock and munlock
#include <algorithm> // for std::max #include <algorithm> // for std::max
#include "gflags/gflags.h" #include "gflags/gflags.h"
#include "paddle/fluid/platform/assert.h"
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/gpu_info.h"
// If use_pinned_memory is true, CPUAllocator calls mlock, which // If use_pinned_memory is true, CPUAllocator calls mlock, which
// returns pinned and locked memory as staging areas for data exchange // returns pinned and locked memory as staging areas for data exchange
...@@ -35,13 +35,13 @@ namespace paddle { ...@@ -35,13 +35,13 @@ namespace paddle {
namespace memory { namespace memory {
namespace detail { namespace detail {
void* CPUAllocator::Alloc(size_t& index, size_t size) { void* CPUAllocator::Alloc(size_t* index, size_t size) {
// According to http://www.cplusplus.com/reference/cstdlib/malloc/, // According to http://www.cplusplus.com/reference/cstdlib/malloc/,
// malloc might not return nullptr if size is zero, but the returned // malloc might not return nullptr if size is zero, but the returned
// pointer shall not be dereferenced -- so we make it nullptr. // pointer shall not be dereferenced -- so we make it nullptr.
if (size <= 0) return nullptr; if (size <= 0) return nullptr;
index = 0; // unlock memory *index = 0; // unlock memory
void* p; void* p;
...@@ -56,7 +56,7 @@ void* CPUAllocator::Alloc(size_t& index, size_t size) { ...@@ -56,7 +56,7 @@ void* CPUAllocator::Alloc(size_t& index, size_t size) {
if (p != nullptr) { if (p != nullptr) {
if (FLAGS_use_pinned_memory) { if (FLAGS_use_pinned_memory) {
index = 1; *index = 1;
mlock(p, size); // lock memory mlock(p, size); // lock memory
} }
} }
...@@ -75,7 +75,7 @@ bool CPUAllocator::UseGpu() const { return false; } ...@@ -75,7 +75,7 @@ bool CPUAllocator::UseGpu() const { return false; }
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
void* GPUAllocator::Alloc(size_t& index, size_t size) { void* GPUAllocator::Alloc(size_t* index, size_t size) {
// CUDA documentation doesn't explain if cudaMalloc returns nullptr // CUDA documentation doesn't explain if cudaMalloc returns nullptr
// if size is 0. We just make sure it does. // if size is 0. We just make sure it does.
if (size <= 0) return nullptr; if (size <= 0) return nullptr;
...@@ -93,7 +93,7 @@ void* GPUAllocator::Alloc(size_t& index, size_t size) { ...@@ -93,7 +93,7 @@ void* GPUAllocator::Alloc(size_t& index, size_t size) {
} }
if (result == cudaSuccess) { if (result == cudaSuccess) {
index = 0; *index = 0;
gpu_alloc_size_ += size; gpu_alloc_size_ += size;
return p; return p;
} else { } else {
...@@ -133,7 +133,7 @@ bool GPUAllocator::UseGpu() const { return true; } ...@@ -133,7 +133,7 @@ bool GPUAllocator::UseGpu() const { return true; }
// PINNED memory allows direct DMA transfers by the GPU to and from system // PINNED memory allows direct DMA transfers by the GPU to and from system
// memory. It’s locked to a physical address. // memory. It’s locked to a physical address.
void* CUDAPinnedAllocator::Alloc(size_t& index, size_t size) { void* CUDAPinnedAllocator::Alloc(size_t* index, size_t size) {
if (size <= 0) return nullptr; if (size <= 0) return nullptr;
// NOTE: here, we use CUDAPinnedMaxAllocSize as the maximum memory size // NOTE: here, we use CUDAPinnedMaxAllocSize as the maximum memory size
...@@ -154,7 +154,7 @@ void* CUDAPinnedAllocator::Alloc(size_t& index, size_t size) { ...@@ -154,7 +154,7 @@ void* CUDAPinnedAllocator::Alloc(size_t& index, size_t size) {
cudaError_t result = cudaMallocHost(&p, size); cudaError_t result = cudaMallocHost(&p, size);
if (result == cudaSuccess) { if (result == cudaSuccess) {
index = 1; // PINNED memory *index = 1; // PINNED memory
cuda_pinnd_alloc_size_ += size; cuda_pinnd_alloc_size_ += size;
return p; return p;
} else { } else {
......
...@@ -29,14 +29,14 @@ namespace detail { ...@@ -29,14 +29,14 @@ namespace detail {
class SystemAllocator { class SystemAllocator {
public: public:
virtual ~SystemAllocator() {} virtual ~SystemAllocator() {}
virtual void* Alloc(size_t& index, size_t size) = 0; virtual void* Alloc(size_t* index, size_t size) = 0;
virtual void Free(void* p, size_t size, size_t index) = 0; virtual void Free(void* p, size_t size, size_t index) = 0;
virtual bool UseGpu() const = 0; virtual bool UseGpu() const = 0;
}; };
class CPUAllocator : public SystemAllocator { class CPUAllocator : public SystemAllocator {
public: public:
virtual void* Alloc(size_t& index, size_t size); virtual void* Alloc(size_t* index, size_t size);
virtual void Free(void* p, size_t size, size_t index); virtual void Free(void* p, size_t size, size_t index);
virtual bool UseGpu() const; virtual bool UseGpu() const;
}; };
...@@ -46,7 +46,7 @@ class GPUAllocator : public SystemAllocator { ...@@ -46,7 +46,7 @@ class GPUAllocator : public SystemAllocator {
public: public:
explicit GPUAllocator(int gpu_id) : gpu_id_(gpu_id) {} explicit GPUAllocator(int gpu_id) : gpu_id_(gpu_id) {}
virtual void* Alloc(size_t& index, size_t size); virtual void* Alloc(size_t* index, size_t size);
virtual void Free(void* p, size_t size, size_t index); virtual void Free(void* p, size_t size, size_t index);
virtual bool UseGpu() const; virtual bool UseGpu() const;
...@@ -58,7 +58,7 @@ class GPUAllocator : public SystemAllocator { ...@@ -58,7 +58,7 @@ class GPUAllocator : public SystemAllocator {
class CUDAPinnedAllocator : public SystemAllocator { class CUDAPinnedAllocator : public SystemAllocator {
public: public:
virtual void* Alloc(size_t& index, size_t size); virtual void* Alloc(size_t* index, size_t size);
virtual void Free(void* p, size_t size, size_t index); virtual void Free(void* p, size_t size, size_t index);
virtual bool UseGpu() const; virtual bool UseGpu() const;
......
...@@ -22,11 +22,11 @@ limitations under the License. */ ...@@ -22,11 +22,11 @@ limitations under the License. */
DECLARE_bool(use_pinned_memory); DECLARE_bool(use_pinned_memory);
void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) { void TestAllocator(paddle::memory::detail::SystemAllocator* a, size_t size) {
bool freed = false; bool freed = false;
{ {
size_t index; size_t index;
void* p = a.Alloc(index, size); void* p = a->Alloc(&index, size);
if (size > 0) { if (size > 0) {
EXPECT_NE(p, nullptr); EXPECT_NE(p, nullptr);
} else { } else {
...@@ -36,7 +36,7 @@ void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) { ...@@ -36,7 +36,7 @@ void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) {
int* i = static_cast<int*>(p); int* i = static_cast<int*>(p);
std::shared_ptr<int> ptr(i, [&](void* p) { std::shared_ptr<int> ptr(i, [&](void* p) {
freed = true; freed = true;
a.Free(p, size, index); a->Free(p, size, index);
}); });
} }
EXPECT_TRUE(freed); EXPECT_TRUE(freed);
...@@ -45,21 +45,21 @@ void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) { ...@@ -45,21 +45,21 @@ void TestAllocator(paddle::memory::detail::SystemAllocator& a, size_t size) {
TEST(CPUAllocator, NoLockMem) { TEST(CPUAllocator, NoLockMem) {
FLAGS_use_pinned_memory = false; FLAGS_use_pinned_memory = false;
paddle::memory::detail::CPUAllocator a; paddle::memory::detail::CPUAllocator a;
TestAllocator(a, 2048); TestAllocator(&a, 2048);
TestAllocator(a, 0); TestAllocator(&a, 0);
} }
TEST(CPUAllocator, LockMem) { TEST(CPUAllocator, LockMem) {
FLAGS_use_pinned_memory = true; FLAGS_use_pinned_memory = true;
paddle::memory::detail::CPUAllocator a; paddle::memory::detail::CPUAllocator a;
TestAllocator(a, 2048); TestAllocator(&a, 2048);
TestAllocator(a, 0); TestAllocator(&a, 0);
} }
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
TEST(GPUAllocator, Alloc) { TEST(GPUAllocator, Alloc) {
paddle::memory::detail::GPUAllocator a(0); paddle::memory::detail::GPUAllocator a(0);
TestAllocator(a, 2048); TestAllocator(&a, 2048);
TestAllocator(a, 0); TestAllocator(&a, 0);
} }
#endif #endif
...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/memory/memory.h" #include "paddle/fluid/memory/malloc.h"
#include "glog/logging.h" #include "glog/logging.h"
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/platform/place.h"
namespace paddle {
namespace memory {
/**
* \brief Allocate memory block in one place.
*
* \param[in] place Allocation place (CPU or GPU).
* \param[in] size Allocation size.
*
* \return Allocated memory block address.
*
* \note If return nullptr, it indicates memory allocation failed
* because insufficient memory in current system. When Alloc
* function is invoked, you must check the returned memory
* address is valid or not.
*/
template <typename Place>
void* Alloc(Place place, size_t size);
/**
* \brief Free memory block in one place.
*
* \param[in] place Allocation place (CPU or GPU).
* \param[in] ptr Memory block address to free.
*
*/
template <typename Place>
void Free(Place place, void* ptr);
/**
* \brief Total size of used memory in one place.
*
* \param[in] place Allocation place (CPU or GPU).
*
*/
template <typename Place>
size_t Used(Place place);
struct Usage : public boost::static_visitor<size_t> {
size_t operator()(const platform::CPUPlace& cpu) const;
size_t operator()(const platform::CUDAPlace& gpu) const;
size_t operator()(const platform::CUDAPinnedPlace& cuda_pinned) const;
};
size_t memory_usage(const platform::Place& p);
/**
* \brief Free memory block in one place.
*
* \note In some cases, custom deleter is used to
* deallocate the memory automatically for
* std::unique_ptr<T> in tensor.h.
*
*/
template <typename T, typename Place>
class PODDeleter {
static_assert(std::is_pod<T>::value, "T must be POD");
public:
explicit PODDeleter(Place place) : place_(place) {}
void operator()(T* ptr) { Free(place_, static_cast<void*>(ptr)); }
private:
Place place_;
};
/**
* \brief Free memory block in one place does not meet POD
*
* \note In some cases, custom deleter is used to
* deallocate the memory automatically for
* std::unique_ptr<T> in tensor.h.
*
*/
template <typename T, typename Place>
class PlainDeleter {
public:
explicit PlainDeleter(Place place) : place_(place) {}
void operator()(T* ptr) { Free(place_, reinterpret_cast<void*>(ptr)); }
private:
Place place_;
};
} // namespace memory
} // namespace paddle
...@@ -12,13 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,13 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/memory/memory.h" #include "paddle/fluid/memory/malloc.h"
#include <unordered_map> #include <unordered_map>
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/memory/detail/memory_block.h"
#include "paddle/fluid/memory/detail/meta_data.h"
#include "paddle/fluid/platform/cpu_info.h" #include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/gpu_info.h" #include "paddle/fluid/platform/gpu_info.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
...@@ -28,7 +27,7 @@ inline bool is_aligned(void const *p) { ...@@ -28,7 +27,7 @@ inline bool is_aligned(void const *p) {
} }
size_t align(size_t size, paddle::platform::CPUPlace place) { size_t align(size_t size, paddle::platform::CPUPlace place) {
size += sizeof(paddle::memory::detail::Metadata); size += sizeof(paddle::memory::detail::MemoryBlock::Desc);
size_t alignment = paddle::platform::CpuMinChunkSize(); size_t alignment = paddle::platform::CpuMinChunkSize();
size_t remaining = size % alignment; size_t remaining = size % alignment;
return remaining == 0 ? size : size + (alignment - remaining); return remaining == 0 ? size : size + (alignment - remaining);
...@@ -86,7 +85,7 @@ TEST(BuddyAllocator, CPUMultAlloc) { ...@@ -86,7 +85,7 @@ TEST(BuddyAllocator, CPUMultAlloc) {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
size_t align(size_t size, paddle::platform::CUDAPlace place) { size_t align(size_t size, paddle::platform::CUDAPlace place) {
size += sizeof(paddle::memory::detail::Metadata); size += sizeof(paddle::memory::detail::MemoryBlock::Desc);
size_t alignment = paddle::platform::GpuMinChunkSize(); size_t alignment = paddle::platform::GpuMinChunkSize();
size_t remaining = size % alignment; size_t remaining = size % alignment;
return remaining == 0 ? size : size + (alignment - remaining); return remaining == 0 ? size : size + (alignment - remaining);
...@@ -142,7 +141,7 @@ TEST(BuddyAllocator, GPUMultAlloc) { ...@@ -142,7 +141,7 @@ TEST(BuddyAllocator, GPUMultAlloc) {
} }
size_t align(size_t size, paddle::platform::CUDAPinnedPlace place) { size_t align(size_t size, paddle::platform::CUDAPinnedPlace place) {
size += sizeof(paddle::memory::detail::Metadata); size += sizeof(paddle::memory::detail::MemoryBlock::Desc);
size_t alignment = paddle::platform::CUDAPinnedMinChunkSize(); size_t alignment = paddle::platform::CUDAPinnedMinChunkSize();
size_t remaining = size % alignment; size_t remaining = size % alignment;
return remaining == 0 ? size : size + (alignment - remaining); return remaining == 0 ? size : size + (alignment - remaining);
......
...@@ -14,91 +14,5 @@ limitations under the License. */ ...@@ -14,91 +14,5 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/memory/memcpy.h"
namespace paddle {
namespace memory {
/**
* \brief Allocate memory block in one place.
*
* \param[in] place Allocation place (CPU or GPU).
* \param[in] size Allocation size.
*
* \return Allocated memory block address.
*
* \note If return nullptr, it indicates memory allocation failed
* because insufficient memory in current system. When Alloc
* function is invoked, you must check the returned memory
* address is valid or not.
*/
template <typename Place>
void* Alloc(Place place, size_t size);
/**
* \brief Free memory block in one place.
*
* \param[in] place Allocation place (CPU or GPU).
* \param[in] ptr Memory block address to free.
*
*/
template <typename Place>
void Free(Place place, void* ptr);
/**
* \brief Total size of used memory in one place.
*
* \param[in] place Allocation place (CPU or GPU).
*
*/
template <typename Place>
size_t Used(Place place);
struct Usage : public boost::static_visitor<size_t> {
size_t operator()(const platform::CPUPlace& cpu) const;
size_t operator()(const platform::CUDAPlace& gpu) const;
size_t operator()(const platform::CUDAPinnedPlace& cuda_pinned) const;
};
size_t memory_usage(const platform::Place& p);
/**
* \brief Free memory block in one place.
*
* \note In some cases, custom deleter is used to
* deallocate the memory automatically for
* std::unique_ptr<T> in tensor.h.
*
*/
template <typename T, typename Place>
class PODDeleter {
static_assert(std::is_pod<T>::value, "T must be POD");
public:
explicit PODDeleter(Place place) : place_(place) {}
void operator()(T* ptr) { Free(place_, static_cast<void*>(ptr)); }
private:
Place place_;
};
/**
* \brief Free memory block in one place does not meet POD
*
* \note In some cases, custom deleter is used to
* deallocate the memory automatically for
* std::unique_ptr<T> in tensor.h.
*
*/
template <typename T, typename Place>
class PlainDeleter {
public:
explicit PlainDeleter(Place place) : place_(place) {}
void operator()(T* ptr) { Free(place_, reinterpret_cast<void*>(ptr)); }
private:
Place place_;
};
} // namespace memory
} // namespace paddle
...@@ -15,7 +15,6 @@ limitations under the License. */ ...@@ -15,7 +15,6 @@ limitations under the License. */
#include <unordered_map> #include <unordered_map>
#include "paddle/fluid/memory/detail/memory_block.h" #include "paddle/fluid/memory/detail/memory_block.h"
#include "paddle/fluid/memory/detail/meta_data.h"
#include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/memory/memory.h" #include "paddle/fluid/memory/memory.h"
......
...@@ -263,7 +263,7 @@ cc_test(net_op_test SRCS net_op_test.cc DEPS net_op) ...@@ -263,7 +263,7 @@ cc_test(net_op_test SRCS net_op_test.cc DEPS net_op)
cc_test(scatter_test SRCS scatter_test.cc DEPS tensor) cc_test(scatter_test SRCS scatter_test.cc DEPS tensor)
cc_test(beam_search_decode_op_test SRCS beam_search_decode_op_test.cc DEPS lod_tensor) cc_test(beam_search_decode_op_test SRCS beam_search_decode_op_test.cc DEPS lod_tensor)
cc_test(beam_search_op_test SRCS beam_search_op_test.cc DEPS lod_tensor beam_search_op) cc_test(beam_search_op_test SRCS beam_search_op_test.cc DEPS lod_tensor beam_search_op)
cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor paddle_memory) cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor memory)
cc_test(save_load_op_test SRCS save_load_op_test.cc DEPS save_op load_op) cc_test(save_load_op_test SRCS save_load_op_test.cc DEPS save_op load_op)
cc_test(save_load_combine_op_test SRCS save_load_combine_op_test.cc DEPS save_combine_op load_combine_op) cc_test(save_load_combine_op_test SRCS save_load_combine_op_test.cc DEPS save_combine_op load_combine_op)
nv_test(nccl_op_test SRCS nccl_op_test.cu.cc DEPS nccl_op gpu_info device_context) nv_test(nccl_op_test SRCS nccl_op_test.cu.cc DEPS nccl_op gpu_info device_context)
......
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
limitations under the License. */ limitations under the License. */
#include "mkldnn.hpp" #include "mkldnn.hpp"
#include "mkldnn_activation_op.h"
#include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/activation_op.h"
#include "paddle/fluid/operators/mkldnn_activation_op.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -40,18 +40,24 @@ void eltwise_forward(const ExecContext &ctx, mkldnn::algorithm algorithm, ...@@ -40,18 +40,24 @@ void eltwise_forward(const ExecContext &ctx, mkldnn::algorithm algorithm,
const T *dst_data = dst->template mutable_data<T>(ctx.GetPlace()); const T *dst_data = dst->template mutable_data<T>(ctx.GetPlace());
// get memory dim // get memory dim
PADDLE_ENFORCE(src->dims().size() == 4, PADDLE_ENFORCE(src->dims().size() == 2 || src->dims().size() == 4,
"Input dim must be with 4, i.e. NCHW"); "Input dim must be with 2 or 4");
std::vector<int> src_tz = framework::vectorize2int(src->dims()); std::vector<int> src_tz = framework::vectorize2int(src->dims());
// create memory description // create memory description
// TODO(kbinias-intel): support more formats auto data_md = src_tz.size() == 2
auto data_md = platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, ? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32,
mkldnn::memory::format::nchw); mkldnn::memory::format::nc)
: platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32,
mkldnn::memory::format::nchw);
// create memory primitives // create memory primitives
auto src_memory = mkldnn::memory({data_md, mkldnn_engine}, (void *)src_data); auto src_memory =
auto dst_memory = mkldnn::memory({data_md, mkldnn_engine}, (void *)dst_data); mkldnn::memory({data_md, mkldnn_engine},
static_cast<void *>(const_cast<float *>(src_data)));
auto dst_memory =
mkldnn::memory({data_md, mkldnn_engine},
static_cast<void *>(const_cast<float *>(dst_data)));
auto forward_desc = mkldnn::eltwise_forward::desc( auto forward_desc = mkldnn::eltwise_forward::desc(
mkldnn::prop_kind::forward_training, algorithm, data_md, alpha, beta); mkldnn::prop_kind::forward_training, algorithm, data_md, alpha, beta);
...@@ -91,15 +97,21 @@ void eltwise_grad(const ExecContext &ctx, mkldnn::algorithm algorithm, ...@@ -91,15 +97,21 @@ void eltwise_grad(const ExecContext &ctx, mkldnn::algorithm algorithm,
std::vector<int> src_tz = framework::vectorize2int(x->dims()); std::vector<int> src_tz = framework::vectorize2int(x->dims());
// create memory description // create memory description
auto data_md = platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, auto data_md = src_tz.size() == 2
mkldnn::memory::format::nchw); ? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32,
mkldnn::memory::format::nc)
: platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32,
mkldnn::memory::format::nchw);
// create memory primitives // create memory primitives
auto src_memory = mkldnn::memory({data_md, mkldnn_engine}, (void *)src); auto src_memory = mkldnn::memory(
{data_md, mkldnn_engine}, static_cast<void *>(const_cast<float *>(src)));
auto diff_src_memory = auto diff_src_memory =
mkldnn::memory({data_md, mkldnn_engine}, (void *)diff_src); mkldnn::memory({data_md, mkldnn_engine},
static_cast<void *>(const_cast<float *>(diff_src)));
auto diff_dst_memory = auto diff_dst_memory =
mkldnn::memory({data_md, mkldnn_engine}, (void *)diff_dst); mkldnn::memory({data_md, mkldnn_engine},
static_cast<void *>(const_cast<float *>(diff_dst)));
auto backward_desc = auto backward_desc =
mkldnn::eltwise_backward::desc(algorithm, data_md, data_md, alpha, beta); mkldnn::eltwise_backward::desc(algorithm, data_md, data_md, alpha, beta);
......
...@@ -5,5 +5,5 @@ if(WITH_DISTRIBUTE) ...@@ -5,5 +5,5 @@ if(WITH_DISTRIBUTE)
set_source_files_properties(serde_test.cc grpc_server_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) set_source_files_properties(serde_test.cc grpc_server_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS})
cc_test(serde_test SRCS serde_test.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr cc_test(serde_test SRCS serde_test.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr
cares zlib protobuf sendrecvop_grpc) cares zlib protobuf sendrecvop_grpc)
cc_test(grpc_server_test SRCS grpc_server_test.cc DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf) cc_test(grpc_server_test SRCS grpc_server_test.cc DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf executor proto_desc lookup_table_op)
endif() endif()
...@@ -138,7 +138,7 @@ bool RPCClient::AsyncPrefetchVariable(const std::string& ep, ...@@ -138,7 +138,7 @@ bool RPCClient::AsyncPrefetchVariable(const std::string& ep,
auto* var = p_scope->FindVar(in_var_name_val); auto* var = p_scope->FindVar(in_var_name_val);
::grpc::ByteBuffer req; ::grpc::ByteBuffer req;
SerializeToByteBuffer(in_var_name_val, var, *p_ctx, &req); SerializeToByteBuffer(in_var_name_val, var, *p_ctx, &req, out_var_name_val);
// var handle // var handle
VarHandle var_h; VarHandle var_h;
......
...@@ -138,39 +138,48 @@ class RequestPrefetch final : public RequestBase { ...@@ -138,39 +138,48 @@ class RequestPrefetch final : public RequestBase {
framework::Scope* scope, framework::Scope* scope,
const platform::DeviceContext* dev_ctx, const platform::DeviceContext* dev_ctx,
framework::Executor* executor, framework::Executor* executor,
framework::ProgramDesc* program, int blkid) framework::ProgramDesc* program,
framework::ExecutorPrepareContext* prefetch_ctx)
: RequestBase(service, cq, dev_ctx), : RequestBase(service, cq, dev_ctx),
responder_(&ctx_), responder_(&ctx_),
scope_(scope), scope_(scope),
executor_(executor), executor_(executor),
program_(program), program_(program),
blkid_(blkid) { prefetch_ctx_(prefetch_ctx) {
request_.reset(new VariableResponse(scope, dev_ctx_));
int method_id = static_cast<int>(detail::GrpcMethod::kPrefetchVariable); int method_id = static_cast<int>(detail::GrpcMethod::kPrefetchVariable);
service_->RequestAsyncUnary(method_id, &ctx_, &request_, &responder_, cq_, service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_,
cq_, this); cq_, cq_, this);
} }
virtual ~RequestPrefetch() {} virtual ~RequestPrefetch() {}
virtual std::string GetReqName() { return request_.varname(); } virtual std::string GetReqName() { return request_->Varname(); }
virtual void Process() { virtual void Process() {
// prefetch process... // prefetch process...
::grpc::ByteBuffer reply; ::grpc::ByteBuffer reply;
// TODO(Yancey1989): execute the Block which containers prefetch ops
VLOG(3) << "RequestPrefetch Process in"; std::string var_name = request_->OutVarname();
auto var_desc = program_->Block(0).FindVar(var_name);
framework::Scope* local_scope = &scope_->NewScope();
auto* var = local_scope->FindVar(var_name);
InitializeVariable(var, var_desc->GetType());
executor_->RunPreparedContext(prefetch_ctx_, scope_, false, false);
SerializeToByteBuffer(var_name, var, *dev_ctx_, &reply);
responder_.Finish(reply, ::grpc::Status::OK, this); responder_.Finish(reply, ::grpc::Status::OK, this);
status_ = FINISH; status_ = FINISH;
} }
protected: protected:
sendrecv::VariableMessage request_; std::shared_ptr<VariableResponse> request_;
ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_;
framework::Scope* scope_; framework::Scope* scope_;
framework::Executor* executor_; framework::Executor* executor_;
framework::ProgramDesc* program_; framework::ProgramDesc* program_;
framework::ExecutorPrepareContext* prefetch_ctx_;
int blkid_; int blkid_;
}; };
...@@ -268,7 +277,7 @@ void AsyncGRPCServer::TryToRegisterNewPrefetchOne() { ...@@ -268,7 +277,7 @@ void AsyncGRPCServer::TryToRegisterNewPrefetchOne() {
} }
RequestPrefetch* prefetch = RequestPrefetch* prefetch =
new RequestPrefetch(&service_, cq_prefetch_.get(), scope_, dev_ctx_, new RequestPrefetch(&service_, cq_prefetch_.get(), scope_, dev_ctx_,
executor_, program_, prefetch_blk_id_); executor_, program_, prefetch_ctx_);
VLOG(4) << "Create RequestPrefetch status:" << prefetch->Status(); VLOG(4) << "Create RequestPrefetch status:" << prefetch->Status();
} }
......
...@@ -63,6 +63,10 @@ class AsyncGRPCServer final { ...@@ -63,6 +63,10 @@ class AsyncGRPCServer final {
void SetExecutor(framework::Executor *executor) { executor_ = executor; } void SetExecutor(framework::Executor *executor) { executor_ = executor; }
void SetPrefetchPreparedCtx(framework::ExecutorPrepareContext *prepared) {
prefetch_ctx_ = prepared;
}
int GetSelectedPort() { return selected_port_; } int GetSelectedPort() { return selected_port_; }
const ReceivedMessage Get() { return this->var_recv_queue_.Pop(); } const ReceivedMessage Get() { return this->var_recv_queue_.Pop(); }
...@@ -111,6 +115,7 @@ class AsyncGRPCServer final { ...@@ -111,6 +115,7 @@ class AsyncGRPCServer final {
std::unique_ptr<std::thread> t_prefetch_; std::unique_ptr<std::thread> t_prefetch_;
int prefetch_blk_id_; int prefetch_blk_id_;
framework::ExecutorPrepareContext *prefetch_ctx_;
framework::ProgramDesc *program_; framework::ProgramDesc *program_;
framework::Executor *executor_; framework::Executor *executor_;
int selected_port_; int selected_port_;
......
...@@ -20,43 +20,121 @@ limitations under the License. */ ...@@ -20,43 +20,121 @@ limitations under the License. */
#include "paddle/fluid/operators/detail/grpc_client.h" #include "paddle/fluid/operators/detail/grpc_client.h"
#include "paddle/fluid/operators/detail/grpc_server.h" #include "paddle/fluid/operators/detail/grpc_server.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
namespace framework = paddle::framework; namespace framework = paddle::framework;
namespace platform = paddle::platform; namespace platform = paddle::platform;
namespace detail = paddle::operators::detail; namespace detail = paddle::operators::detail;
USE_OP(lookup_table);
std::unique_ptr<detail::AsyncGRPCServer> rpc_service_; std::unique_ptr<detail::AsyncGRPCServer> rpc_service_;
framework::BlockDesc* AppendPrefetchBlcok(framework::ProgramDesc* program) {
auto root_block = program->MutableBlock(0);
auto* block = program->AppendBlock(*root_block);
framework::VariableNameMap input({{"W", {"w"}}, {"Ids", {"ids"}}});
framework::VariableNameMap output({{"Output", {"out"}}});
auto op = block->AppendOp();
op->SetType("lookup_table");
op->SetInput("W", {"w"});
op->SetInput("Ids", {"ids"});
op->SetOutput("Out", {"out"});
auto& out = *root_block->Var("out");
out.SetType(framework::proto::VarType::SELECTED_ROWS);
out.SetShape({10, 10});
return block;
}
void CreateVarsOnScope(framework::Scope* scope, platform::CPUPlace* place) {
auto w_var = scope->Var("w");
w_var->GetMutable<framework::SelectedRows>();
auto out_var = scope->Var("out");
out_var->GetMutable<framework::SelectedRows>();
auto ids_var = scope->Var("ids");
ids_var->GetMutable<framework::SelectedRows>();
}
void InitTensorsOnClient(framework::Scope* scope, platform::CPUPlace* place,
int64_t rows_numel) {
CreateVarsOnScope(scope, place);
auto ids_var = scope->Var("ids")->GetMutable<framework::SelectedRows>();
auto rows = ids_var->mutable_rows();
for (int64_t i = 0; i < rows_numel; ++i) rows->push_back(i * 2);
ids_var->mutable_value()->Resize({rows_numel, 1});
ids_var->mutable_value()->mutable_data<float>(*place);
}
void InitTensorsOnServer(framework::Scope* scope, platform::CPUPlace* place,
int64_t rows_numel) {
CreateVarsOnScope(scope, place);
auto w = scope->Var("w")->GetMutable<framework::SelectedRows>();
auto rows = w->mutable_rows();
for (int64_t i = 0; i < rows_numel; ++i) rows->push_back(i);
auto w_value = w->mutable_value();
w_value->Resize({rows_numel, 10});
auto ptr = w_value->mutable_data<float>(*place);
for (int64_t i = 0; i < w_value->numel(); ++i) {
ptr[i] = static_cast<float>(i / 10);
}
}
void StartServer(const std::string& endpoint) { void StartServer(const std::string& endpoint) {
rpc_service_.reset(new detail::AsyncGRPCServer(endpoint)); rpc_service_.reset(new detail::AsyncGRPCServer(endpoint));
framework::ProgramDesc program;
framework::Scope scope;
platform::CPUPlace place;
framework::Executor exe(place);
platform::CPUDeviceContext ctx(place);
auto* block = AppendPrefetchBlcok(&program);
auto prepared = exe.Prepare(program, block->ID());
InitTensorsOnServer(&scope, &place, 10);
rpc_service_->SetProgram(&program);
rpc_service_->SetPrefetchPreparedCtx(prepared.get());
rpc_service_->SetDevCtx(&ctx);
rpc_service_->SetScope(&scope);
rpc_service_->SetExecutor(&exe);
rpc_service_->RunSyncUpdate(); rpc_service_->RunSyncUpdate();
} }
TEST(PREFETCH, CPU) { TEST(PREFETCH, CPU) {
// start up a server instance backend // start up a server instance backend
// TODO(Yancey1989): Need to start a server with optimize blocks and
// prefetch blocks.
std::thread server_thread(StartServer, "127.0.0.1:8889"); std::thread server_thread(StartServer, "127.0.0.1:8889");
sleep(2);
framework::Scope scope; framework::Scope scope;
platform::CPUPlace place; platform::CPUPlace place;
platform::CPUDeviceContext ctx(place); platform::CPUDeviceContext ctx(place);
// create var on local scope // create var on local scope
std::string in_var_name("in"); int64_t rows_numel = 5;
InitTensorsOnClient(&scope, &place, rows_numel);
std::string in_var_name("ids");
std::string out_var_name("out"); std::string out_var_name("out");
auto* in_var = scope.Var(in_var_name);
auto* in_tensor = in_var->GetMutable<framework::LoDTensor>();
in_tensor->Resize({10, 10});
VLOG(3) << "before mutable_data";
in_tensor->mutable_data<int>(place);
scope.Var(out_var_name);
VLOG(3) << "before fetch";
detail::RPCClient client; detail::RPCClient client;
client.AsyncPrefetchVariable("127.0.0.1:8889", ctx, scope, in_var_name, client.AsyncPrefetchVariable("127.0.0.1:8889", ctx, scope, in_var_name,
out_var_name); out_var_name);
client.Wait(); client.Wait();
auto var = scope.Var(out_var_name);
auto value = var->GetMutable<framework::SelectedRows>()->value();
auto ptr = value.mutable_data<float>(place);
rpc_service_->ShutDown(); rpc_service_->ShutDown();
server_thread.join(); server_thread.join();
rpc_service_.reset(nullptr); rpc_service_.reset(nullptr);
for (int64_t i = 0; i < rows_numel; ++i) {
EXPECT_EQ(ptr[0 + i * value.dims()[1]], static_cast<float>(i * 2));
}
} }
...@@ -21,7 +21,7 @@ service SendRecvService { ...@@ -21,7 +21,7 @@ service SendRecvService {
rpc SendVariable(VariableMessage) returns (VoidMessage) {} rpc SendVariable(VariableMessage) returns (VoidMessage) {}
// Argument VariableMessage for GetVariable should only contain varname. // Argument VariableMessage for GetVariable should only contain varname.
rpc GetVariable(VariableMessage) returns (VariableMessage) {} rpc GetVariable(VariableMessage) returns (VariableMessage) {}
// Prefetch variable by Ids // pre-fetch variable by given variable name and Ids
rpc PrefetchVariable(VariableMessage) returns (VariableMessage) {} rpc PrefetchVariable(VariableMessage) returns (VariableMessage) {}
} }
...@@ -67,6 +67,8 @@ message VariableMessage { ...@@ -67,6 +67,8 @@ message VariableMessage {
bytes serialized = 8; bytes serialized = 8;
// selected_rows data // selected_rows data
bytes rows = 9; bytes rows = 9;
// Look up table block execution output variable name.
string out_varname = 10;
} }
message VoidMessage {} message VoidMessage {}
...@@ -30,11 +30,9 @@ namespace detail { ...@@ -30,11 +30,9 @@ namespace detail {
void SerializeToByteBuffer(const std::string& name, framework::Variable* var, void SerializeToByteBuffer(const std::string& name, framework::Variable* var,
const platform::DeviceContext& ctx, const platform::DeviceContext& ctx,
::grpc::ByteBuffer* msg) { ::grpc::ByteBuffer* msg,
const std::string& out_name) {
using VarMsg = sendrecv::VariableMessage; using VarMsg = sendrecv::VariableMessage;
sendrecv::VariableMessage request;
std::string header;
request.AppendToString(&header);
// When using GPU, need to free the copied CPU buffer // When using GPU, need to free the copied CPU buffer
// when the ByteBuffer destroies // when the ByteBuffer destroies
// TODO(typhoonzero): add unref here, if we have dependent // TODO(typhoonzero): add unref here, if we have dependent
...@@ -52,6 +50,9 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, ...@@ -52,6 +50,9 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var,
e.WriteUint64(VarMsg::kTypeFieldNumber, 1); e.WriteUint64(VarMsg::kTypeFieldNumber, 1);
} }
if (!out_name.empty()) {
e.WriteString(VarMsg::kOutVarnameFieldNumber, out_name);
}
switch (framework::ToVarType(var->Type())) { switch (framework::ToVarType(var->Type())) {
case framework::proto::VarType_Type_LOD_TENSOR: { case framework::proto::VarType_Type_LOD_TENSOR: {
auto tensor = var->Get<framework::LoDTensor>(); auto tensor = var->Get<framework::LoDTensor>();
......
...@@ -46,7 +46,8 @@ typedef void (*DestroyCallback)(void*); ...@@ -46,7 +46,8 @@ typedef void (*DestroyCallback)(void*);
void SerializeToByteBuffer(const std::string& name, framework::Variable* var, void SerializeToByteBuffer(const std::string& name, framework::Variable* var,
const platform::DeviceContext& ctx, const platform::DeviceContext& ctx,
::grpc::ByteBuffer* msg); ::grpc::ByteBuffer* msg,
const std::string& out_varname = std::string());
void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg,
const platform::DeviceContext& ctx, const platform::DeviceContext& ctx,
......
...@@ -416,6 +416,20 @@ int VariableResponse::Parse(Source* source) { ...@@ -416,6 +416,20 @@ int VariableResponse::Parse(Source* source) {
} }
break; break;
} }
case sendrecv::VariableMessage::kOutVarnameFieldNumber: {
uint32_t length;
if ((wt != WIRETYPE_LENGTH_DELIMITED) || !input.ReadVarint32(&length)) {
return tag;
}
std::string temp;
if (!input.ReadString(&temp, length)) {
return tag;
}
meta_.set_out_varname(temp);
break;
}
default: { default: {
// Unknown tag, return unknown error. // Unknown tag, return unknown error.
......
...@@ -55,6 +55,7 @@ class VariableResponse { ...@@ -55,6 +55,7 @@ class VariableResponse {
int Parse(const ::grpc::ByteBuffer& byte_buffer); int Parse(const ::grpc::ByteBuffer& byte_buffer);
inline std::string Varname() { return meta_.varname(); } inline std::string Varname() { return meta_.varname(); }
inline std::string OutVarname() { return meta_.out_varname(); }
// should call parse first. // should call parse first.
framework::Variable* GetVar() { return scope_->FindVar(meta_.varname()); } framework::Variable* GetVar() { return scope_->FindVar(meta_.varname()); }
......
...@@ -14,6 +14,8 @@ limitations under the License. */ ...@@ -14,6 +14,8 @@ limitations under the License. */
#pragma once #pragma once
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
...@@ -35,7 +37,7 @@ class LoDResetKernel : public framework::OpKernel<T> { ...@@ -35,7 +37,7 @@ class LoDResetKernel : public framework::OpKernel<T> {
if (lod_t->lod().size() > 0) { if (lod_t->lod().size() > 0) {
auto y_lod = lod_t->lod(); auto y_lod = lod_t->lod();
auto last_level = y_lod[y_lod.size() - 1]; auto last_level = y_lod[y_lod.size() - 1];
PADDLE_ENFORCE_EQ(last_level.back(), in->dims()[0], PADDLE_ENFORCE_EQ((int64_t)(last_level.back()), in->dims()[0],
"Last value of `Y`'s last level LoD should be equal " "Last value of `Y`'s last level LoD should be equal "
"to the first dimension of `X`"); "to the first dimension of `X`");
out->set_lod(y_lod); out->set_lod(y_lod);
......
...@@ -42,12 +42,12 @@ ENDIF() ...@@ -42,12 +42,12 @@ ENDIF()
# memcpy depends on device_context, here add deps individually for # memcpy depends on device_context, here add deps individually for
# avoiding cycle dependencies # avoiding cycle dependencies
cc_library(device_context SRCS device_context.cc DEPS memory buddy_allocator cc_library(device_context SRCS device_context.cc DEPS malloc
system_allocator memory_block meta_data meta_cache place eigen3 ${GPU_CTX_DEPS} ${MKLDNN_CTX_DEPS}) place eigen3 ${GPU_CTX_DEPS} ${MKLDNN_CTX_DEPS})
nv_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_info) nv_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_info)
nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda)
nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place device_context) nv_test(transform_test SRCS transform_test.cu DEPS memory place device_context)
cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto ${GPU_CTX_DEPS}) cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto ${GPU_CTX_DEPS})
cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer) cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer)
......
...@@ -2,13 +2,13 @@ if(WITH_PYTHON) ...@@ -2,13 +2,13 @@ if(WITH_PYTHON)
if(WITH_AMD_GPU) if(WITH_AMD_GPU)
hip_library(paddle_pybind SHARED hip_library(paddle_pybind SHARED
SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc
DEPS pybind python backward proto_desc paddle_memory executor prune init profiler feed_fetch_method DEPS pybind python backward proto_desc memory executor prune init profiler feed_fetch_method
parallel_executor parallel_executor
${GLOB_OP_LIB}) ${GLOB_OP_LIB})
else() else()
cc_library(paddle_pybind SHARED cc_library(paddle_pybind SHARED
SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc
DEPS pybind python backward proto_desc paddle_memory executor prune init profiler feed_fetch_method DEPS pybind python backward proto_desc memory executor prune init profiler feed_fetch_method
parallel_executor parallel_executor
${GLOB_OP_LIB}) ${GLOB_OP_LIB})
if(NOT APPLE AND NOT ANDROID) if(NOT APPLE AND NOT ANDROID)
......
...@@ -6,6 +6,6 @@ if(WITH_TESTING) ...@@ -6,6 +6,6 @@ if(WITH_TESTING)
add_library(paddle_test_util STATIC TestUtil.cpp) add_library(paddle_test_util STATIC TestUtil.cpp)
add_dependencies(paddle_test_util paddle_proto ${external_project_dependencies}) add_dependencies(paddle_test_util paddle_proto ${external_project_dependencies})
if(NOT MOBILE_INFERENCE) if(NOT MOBILE_INFERENCE)
cc_library(paddle_gtest_main SRCS paddle_gtest_main.cc DEPS init paddle_memory gtest gflags) cc_library(paddle_gtest_main SRCS paddle_gtest_main.cc DEPS init memory gtest gflags)
endif() endif()
endif() endif()
...@@ -16,6 +16,7 @@ import sys ...@@ -16,6 +16,7 @@ import sys
import re import re
from graphviz import GraphPreviewGenerator from graphviz import GraphPreviewGenerator
import proto.framework_pb2 as framework_pb2 import proto.framework_pb2 as framework_pb2
from google.protobuf import text_format
_vartype2str_ = [ _vartype2str_ = [
"UNK", "UNK",
...@@ -100,7 +101,7 @@ def repr_var(vardesc): ...@@ -100,7 +101,7 @@ def repr_var(vardesc):
def pprint_program_codes(program_desc): def pprint_program_codes(program_desc):
reprs = [] reprs = []
for block_idx in range(program_desc.num_blocks()): for block_idx in range(program_desc.desc.num_blocks()):
block_desc = program_desc.block(block_idx) block_desc = program_desc.block(block_idx)
block_repr = pprint_block_codes(block_desc) block_repr = pprint_block_codes(block_desc)
reprs.append(block_repr) reprs.append(block_repr)
...@@ -127,7 +128,7 @@ def pprint_block_codes(block_desc, show_backward=False): ...@@ -127,7 +128,7 @@ def pprint_block_codes(block_desc, show_backward=False):
if type(block_desc) is not framework_pb2.BlockDesc: if type(block_desc) is not framework_pb2.BlockDesc:
block_desc = framework_pb2.BlockDesc.FromString( block_desc = framework_pb2.BlockDesc.FromString(
block_desc.serialize_to_string()) block_desc.desc.serialize_to_string())
var_reprs = [] var_reprs = []
op_reprs = [] op_reprs = []
for var in block_desc.vars: for var in block_desc.vars:
...@@ -237,13 +238,13 @@ def draw_block_graphviz(block, highlights=None, path="./temp.dot"): ...@@ -237,13 +238,13 @@ def draw_block_graphviz(block, highlights=None, path="./temp.dot"):
# draw parameters and args # draw parameters and args
vars = {} vars = {}
for var in desc.vars: for var in desc.vars:
shape = [str(i) for i in var.lod_tensor.tensor.dims] # TODO(gongwb): format the var.type
if not shape:
shape = ['null']
# create var # create var
if var.persistable: if var.persistable:
varn = graph.add_param( varn = graph.add_param(
var.name, var.type, shape, highlight=need_highlight(var.name)) var.name,
str(var.type).replace("\n", "<br />", 1),
highlight=need_highlight(var.name))
else: else:
varn = graph.add_arg(var.name, highlight=need_highlight(var.name)) varn = graph.add_arg(var.name, highlight=need_highlight(var.name))
vars[var.name] = varn vars[var.name] = varn
...@@ -268,4 +269,4 @@ def draw_block_graphviz(block, highlights=None, path="./temp.dot"): ...@@ -268,4 +269,4 @@ def draw_block_graphviz(block, highlights=None, path="./temp.dot"):
for var in op.outputs: for var in op.outputs:
add_op_link_var(opn, var, True) add_op_link_var(opn, var, True)
graph(path, show=True) graph(path, show=False)
...@@ -965,6 +965,13 @@ class Block(object): ...@@ -965,6 +965,13 @@ class Block(object):
if var.type == core.VarDesc.VarType.STEP_SCOPES: if var.type == core.VarDesc.VarType.STEP_SCOPES:
ret_var = self.create_var( ret_var = self.create_var(
name=var.name, persistable=var.persistable, type=var.type) name=var.name, persistable=var.persistable, type=var.type)
elif var.type == core.VarDesc.VarType.SELECTED_ROWS:
ret_var = self.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
persistable=True)
else: else:
ret_var = self.create_var( ret_var = self.create_var(
name=var.name, name=var.name,
......
...@@ -83,7 +83,7 @@ class Graph(object): ...@@ -83,7 +83,7 @@ class Graph(object):
file = open(dot_path, 'w') file = open(dot_path, 'w')
file.write(self.__str__()) file.write(self.__str__())
image_path = os.path.join( image_path = os.path.join(
os.path.dirname(__file__), dot_path[:-3] + "pdf") os.path.dirname(dot_path), dot_path[:-3] + "pdf")
cmd = ["dot", "-Tpdf", dot_path, "-o", image_path] cmd = ["dot", "-Tpdf", dot_path, "-o", image_path]
subprocess.Popen( subprocess.Popen(
cmd, cmd,
...@@ -199,7 +199,7 @@ class GraphPreviewGenerator(object): ...@@ -199,7 +199,7 @@ class GraphPreviewGenerator(object):
else: else:
self.graph.show(path) self.graph.show(path)
def add_param(self, name, data_type, shape, highlight=False): def add_param(self, name, data_type, highlight=False):
label = '\n'.join([ label = '\n'.join([
'<<table cellpadding="5">', '<<table cellpadding="5">',
' <tr>', ' <tr>',
...@@ -214,11 +214,6 @@ class GraphPreviewGenerator(object): ...@@ -214,11 +214,6 @@ class GraphPreviewGenerator(object):
str(data_type), str(data_type),
' </td>' ' </td>'
' </tr>', ' </tr>',
' <tr>',
' <td>',
'[%s]' % 'x'.join(shape),
' </td>'
' </tr>',
'</table>>', '</table>>',
]) ])
return self.graph.node( return self.graph.node(
......
...@@ -535,9 +535,37 @@ class TestSwish(OpTest): ...@@ -535,9 +535,37 @@ class TestSwish(OpTest):
#--------------------test MKLDNN-------------------- #--------------------test MKLDNN--------------------
class TestMKLDNNRelu(TestRelu): class TestMKLDNNReluDim2(TestRelu):
def setUp(self): def setUp(self):
super(TestMKLDNNRelu, self).setUp() super(TestMKLDNNReluDim2, self).setUp()
self.attrs = {"use_mkldnn": True}
class TestMKLDNNTanhDim2(TestTanh):
def setUp(self):
super(TestMKLDNNTanhDim2, self).setUp()
self.attrs = {"use_mkldnn": True}
class TestMKLDNNSqrtDim2(TestSqrt):
def setUp(self):
super(TestMKLDNNSqrtDim2, self).setUp()
self.attrs = {"use_mkldnn": True}
class TestMKLDNNAbsDim2(TestAbs):
def setUp(self):
super(TestMKLDNNAbsDim2, self).setUp()
self.attrs = {"use_mkldnn": True}
class TestMKLDNNReluDim4(TestRelu):
def setUp(self):
super(TestMKLDNNReluDim4, self).setUp()
x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32") x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32")
# The same reason with TestAbs # The same reason with TestAbs
...@@ -549,9 +577,9 @@ class TestMKLDNNRelu(TestRelu): ...@@ -549,9 +577,9 @@ class TestMKLDNNRelu(TestRelu):
self.attrs = {"use_mkldnn": True} self.attrs = {"use_mkldnn": True}
class TestMKLDNNTanh(TestTanh): class TestMKLDNNTanhDim4(TestTanh):
def setUp(self): def setUp(self):
super(TestMKLDNNTanh, self).setUp() super(TestMKLDNNTanhDim4, self).setUp()
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32") 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32")
...@@ -560,9 +588,9 @@ class TestMKLDNNTanh(TestTanh): ...@@ -560,9 +588,9 @@ class TestMKLDNNTanh(TestTanh):
self.attrs = {"use_mkldnn": True} self.attrs = {"use_mkldnn": True}
class TestMKLDNNSqrt(TestSqrt): class TestMKLDNNSqrtDim4(TestSqrt):
def setUp(self): def setUp(self):
super(TestMKLDNNSqrt, self).setUp() super(TestMKLDNNSqrtDim4, self).setUp()
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32") 'X': np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype("float32")
...@@ -571,9 +599,9 @@ class TestMKLDNNSqrt(TestSqrt): ...@@ -571,9 +599,9 @@ class TestMKLDNNSqrt(TestSqrt):
self.attrs = {"use_mkldnn": True} self.attrs = {"use_mkldnn": True}
class TestMKLDNNAbs(TestAbs): class TestMKLDNNAbsDim4(TestAbs):
def setUp(self): def setUp(self):
super(TestMKLDNNAbs, self).setUp() super(TestMKLDNNAbsDim4, self).setUp()
x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32") x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype("float32")
# The same reason with TestAbs # The same reason with TestAbs
......
...@@ -51,7 +51,9 @@ class TestDebugger(unittest.TestCase): ...@@ -51,7 +51,9 @@ class TestDebugger(unittest.TestCase):
outputs={"Out": mul_out}, outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1}) attrs={"x_num_col_dims": 1})
print(debuger.pprint_program_codes(p.desc)) print(debuger.pprint_program_codes(p))
debuger.draw_block_graphviz(p.block(0), path="./test.dot")
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册