提交 e81f0228 编写于 作者: A Adam 提交者: Tao Luo

MKL-DNN 1.0 Update (#20162)

* MKLDNN v1.0 rebase to Paddle 1.6
test=develop

* Add hacky paddle::string::to_string() implementation

* vectorize<int64-t>() -> vectorize() cleanup
test=develop

* PADDLE_ENFORCE and void_cast fixes
test=develop

* Rebase changes
test=develop

* Cosmetics
test=develop

* Delete MKL from mkldnn.cmake
test=develop

* CMake debug commands
test=develop

* Delete MKLDNN_VERBOSE and rebase fixes
test=develop

* Rebase fixes
test=develop

* Temporarily disable int8 resnet101 vgg16 and vgg19 tests
test=develop

* Add libmkldnn.so.1 to python setup
test=develop

* Add libmkldnn.so.1 to inference_lib cmake after rebase
test=develop

* Post rebase fixes + FC int8 changes
test=develop

* Fix LRN NHWC
test=develop

* Fix NHWC conv3d
test=develop

* Windows build fix + next conv3d fix
test=develop

* Fix conv2d on AVX2 machines
test=develop
上级 7f5d532a
......@@ -19,7 +19,7 @@ SET(MKLDNN_PREFIX_DIR ${THIRD_PARTY_PATH}/mkldnn)
SET(MKLDNN_INSTALL_DIR ${THIRD_PARTY_PATH}/install/mkldnn)
SET(MKLDNN_INC_DIR "${MKLDNN_INSTALL_DIR}/include" CACHE PATH "mkldnn include directory." FORCE)
SET(MKLDNN_REPOSITORY https://github.com/intel/mkl-dnn.git)
SET(MKLDNN_TAG aef88b7c233f48f8b945da310f1b973da31ad033)
SET(MKLDNN_TAG 518a316a8cd6deb82dc7866bc04bd0355a25c3a4)
# Introduce variables:
# * CMAKE_INSTALL_LIBDIR
......@@ -35,13 +35,6 @@ SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_RPATH}" "${MKLDNN_INSTALL_DIR}/${LIBDIR
INCLUDE_DIRECTORIES(${MKLDNN_INC_DIR}) # For MKLDNN code to include internal headers.
IF(${CBLAS_PROVIDER} STREQUAL "MKLML")
SET(MKLDNN_DEPENDS ${MKLML_PROJECT})
MESSAGE(STATUS "Build MKLDNN with MKLML ${MKLML_ROOT}")
ELSE()
MESSAGE(FATAL_ERROR "Should enable MKLML when build MKLDNN")
ENDIF()
IF(NOT WIN32)
SET(MKLDNN_FLAG "-Wno-error=strict-overflow -Wno-error=unused-result -Wno-error=array-bounds")
SET(MKLDNN_FLAG "${MKLDNN_FLAG} -Wno-unused-result -Wno-unused-value")
......@@ -63,7 +56,8 @@ ExternalProject_Add(
DEPENDS ${MKLDNN_DEPENDS}
PREFIX ${MKLDNN_PREFIX_DIR}
SOURCE_DIR ${MKLDNN_SOURCE_DIR}
UPDATE_COMMAND ""
BUILD_ALWAYS 1
# UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}
......@@ -77,9 +71,8 @@ ExternalProject_Add(
-DMKLROOT=${MKLML_ROOT}
-DCMAKE_C_FLAGS=${MKLDNN_CFLAG}
-DCMAKE_CXX_FLAGS=${MKLDNN_CXXFLAG}
-DWITH_TEST=OFF -DWITH_EXAMPLE=OFF
-DMKLDNN_BUILD_TESTS=OFF -DMKLDNN_BUILD_EXAMPLES=OFF
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${MKLDNN_INSTALL_DIR}
-DMKLROOT:PATH=${MKLML_ROOT}
)
if(WIN32)
SET(MKLDNN_LIB "${MKLDNN_INSTALL_DIR}/${LIBDIR}/mkldnn.lib" CACHE FILEPATH "mkldnn library." FORCE)
......@@ -98,7 +91,7 @@ add_definitions(-DPADDLE_WITH_MKLDNN)
SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/mkldnn_dummy.c)
FILE(WRITE ${dummyfile} "const char * dummy = \"${dummyfile}\";")
ADD_LIBRARY(mkldnn STATIC ${dummyfile})
TARGET_LINK_LIBRARIES(mkldnn ${MKLDNN_LIB} ${MKLML_LIB} ${MKLML_IOMP_LIB})
TARGET_LINK_LIBRARIES(mkldnn ${MKLDNN_LIB} ${MKLML_IOMP_LIB})
ADD_DEPENDENCIES(mkldnn ${MKLDNN_PROJECT})
# copy the real so.0 lib to install dir
......@@ -107,6 +100,9 @@ if(WIN32)
SET(MKLDNN_SHARED_LIB ${MKLDNN_INSTALL_DIR}/bin/mkldnn.dll)
else(WIN32)
SET(MKLDNN_SHARED_LIB ${MKLDNN_INSTALL_DIR}/libmkldnn.so.0)
SET(MKLDNN_SHARED_LIB_1 ${MKLDNN_INSTALL_DIR}/libmkldnn.so.1)
ADD_CUSTOM_COMMAND(TARGET ${MKLDNN_PROJECT} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_LIB} ${MKLDNN_SHARED_LIB})
ADD_CUSTOM_COMMAND(TARGET ${MKLDNN_PROJECT} POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_LIB} ${MKLDNN_SHARED_LIB_1})
endif(WIN32)
......@@ -84,8 +84,8 @@ function(copy_part_of_thrid_party TARGET DST)
DSTS ${dst_dir} ${dst_dir}/lib ${dst_dir}/lib)
else()
copy(${TARGET}
SRCS ${MKLDNN_INC_DIR} ${MKLDNN_SHARED_LIB}
DSTS ${dst_dir} ${dst_dir}/lib)
SRCS ${MKLDNN_INC_DIR} ${MKLDNN_SHARED_LIB} ${MKLDNN_SHARED_LIB_1}
DSTS ${dst_dir} ${dst_dir}/lib ${dst_dir}/lib)
endif()
endif()
......
......@@ -105,8 +105,6 @@ void* GetDataFromTensor(const Tensor& tensor, mkldnn::memory::data_type type) {
return platform::to_void_cast(tensor.data<int8_t>());
case mkldnn::memory::data_type::u8:
return platform::to_void_cast(tensor.data<unsigned char>());
case mkldnn::memory::data_type::s16:
return platform::to_void_cast(tensor.data<int16_t>());
case mkldnn::memory::data_type::s32:
return platform::to_void_cast(tensor.data<int32_t>());
default:
......@@ -134,7 +132,7 @@ void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var,
void innerTransDataLayoutFromMKLDNN(DataLayout in_layout, DataLayout out_layout,
const Tensor& in, Tensor* out,
platform::Place place) {
PADDLE_ENFORCE_NE(in.format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(in.format(), MKLDNNMemoryFormat::undef,
platform::errors::InvalidArgument(
"Input tensor format is invalid. Input tensor should "
"have specified memory format."));
......@@ -151,12 +149,12 @@ void innerTransDataLayoutFromMKLDNN(DataLayout in_layout, DataLayout out_layout,
auto* dev_ctx = dynamic_cast<platform::MKLDNNDeviceContext*>(pool.Get(place));
auto& cpu_engine = dev_ctx->GetEngine();
auto in_tz = paddle::framework::vectorize<int>(in.dims());
auto in_tz = paddle::framework::vectorize<int64_t>(in.dims());
auto out_tz = in_tz;
memory::data_type in_type = ToMKLDNNDataType(in.type());
PADDLE_ENFORCE(in_type != memory::data_type::data_undef,
"Input tensor type is not supported: %s", in.type());
PADDLE_ENFORCE_NE(in_type, memory::data_type::undef,
"Input tensor type is not supported: %s", in.type());
auto in_format = platform::MKLDNNFormatForSize(in_tz.size(), in.format());
auto out_format =
......@@ -167,8 +165,8 @@ void innerTransDataLayoutFromMKLDNN(DataLayout in_layout, DataLayout out_layout,
if (in_format != out_format) {
void* in_data = GetDataFromTensor(in, in_type);
const std::string key = platform::CreateKey(in_tz, in_format, out_format,
std::to_string(in_type));
const std::string key =
platform::CreateKey(in_tz, in_format, out_format, in_type);
platform::ReorderMKLDNNHandler handler(in_tz, in.type(), in_type, *dev_ctx,
cpu_engine, key);
......@@ -179,9 +177,9 @@ void innerTransDataLayoutFromMKLDNN(DataLayout in_layout, DataLayout out_layout,
auto reorder_p =
handler.AcquireReorder(reorder_dst_memory_p, reorder_src_memory_p);
std::vector<mkldnn::primitive> pipeline;
pipeline.push_back(*reorder_p);
mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait();
mkldnn::stream astream(cpu_engine);
reorder_p->execute(astream, *reorder_src_memory_p, *reorder_dst_memory_p);
astream.wait();
} else {
out->ShareDataWith(in);
}
......@@ -193,7 +191,7 @@ void innerTransDataLayoutFromMKLDNN(DataLayout in_layout, DataLayout out_layout,
}
out->set_layout(out_layout);
// reset format since the out tensor will be feed to non-MKLDNN OPkernel
out->set_format(MKLDNNMemoryFormat::format_undef);
out->set_format(MKLDNNMemoryFormat::undef);
}
#endif
......
......@@ -59,11 +59,10 @@ inline MKLDNNDataType ToMKLDNNDataType(proto::VarType::Type type) {
{DataTypeTrait<float>::DataType(), MKLDNNDataType::f32},
{DataTypeTrait<int8_t>::DataType(), MKLDNNDataType::s8},
{DataTypeTrait<uint8_t>::DataType(), MKLDNNDataType::u8},
{DataTypeTrait<int16_t>::DataType(), MKLDNNDataType::s16},
{DataTypeTrait<int32_t>::DataType(), MKLDNNDataType::s32}};
auto iter = dict.find(static_cast<int>(type));
if (iter != dict.end()) return iter->second;
return MKLDNNDataType::data_undef;
return MKLDNNDataType::undef;
}
void innerTransDataLayoutFromMKLDNN(DataLayout in_layout, DataLayout out_layout,
......
......@@ -38,9 +38,9 @@ class Tensor {
#ifdef PADDLE_WITH_MKLDNN
public:
inline mkldnn::memory::format format() const { return format_; }
inline mkldnn::memory::format_tag format() const { return format_; }
inline void set_format(const mkldnn::memory::format format) {
inline void set_format(const mkldnn::memory::format_tag format) {
format_ = format;
}
......@@ -54,7 +54,7 @@ class Tensor {
* this field.
*/
mkldnn::memory::format format_ = mkldnn::memory::format::format_undef;
mkldnn::memory::format_tag format_ = mkldnn::memory::format_tag::undef;
#endif
public:
......
......@@ -248,19 +248,22 @@ if(WITH_MKLDNN)
inference_analysis_api_int8_test_run(test_analyzer_int8_mobilenetv2 ${INT8_IMG_CLASS_TEST_APP} ${INT8_MOBILENETV2_MODEL_DIR} ${IMAGENET_DATA_PATH})
# resnet101 int8
set(INT8_RESNET101_MODEL_DIR "${INT8_DATA_DIR}/resnet101")
download_int8_data(${INT8_RESNET101_MODEL_DIR} "Res101_int8_model.tar.gz" )
inference_analysis_api_int8_test_run(test_analyzer_int8_resnet101 ${INT8_IMG_CLASS_TEST_APP} ${INT8_RESNET101_MODEL_DIR} ${IMAGENET_DATA_PATH})
# TODO(grygielski) Enable after MKL-DNN 1.0 merge
# set(INT8_RESNET101_MODEL_DIR "${INT8_DATA_DIR}/resnet101")
# download_int8_data(${INT8_RESNET101_MODEL_DIR} "Res101_int8_model.tar.gz" )
# inference_analysis_api_int8_test_run(test_analyzer_int8_resnet101 ${INT8_IMG_CLASS_TEST_APP} ${INT8_RESNET101_MODEL_DIR} ${IMAGENET_DATA_PATH})
# vgg16 int8
set(INT8_VGG16_MODEL_DIR "${INT8_DATA_DIR}/vgg16")
download_int8_data(${INT8_VGG16_MODEL_DIR} "VGG16_int8_model.tar.gz" )
inference_analysis_api_int8_test_run(test_analyzer_int8_vgg16 ${INT8_IMG_CLASS_TEST_APP} ${INT8_VGG16_MODEL_DIR} ${IMAGENET_DATA_PATH})
# TODO(grygielski) Enable after MKL-DNN 1.0 merge
# set(INT8_VGG16_MODEL_DIR "${INT8_DATA_DIR}/vgg16")
# download_int8_data(${INT8_VGG16_MODEL_DIR} "VGG16_int8_model.tar.gz" )
# inference_analysis_api_int8_test_run(test_analyzer_int8_vgg16 ${INT8_IMG_CLASS_TEST_APP} ${INT8_VGG16_MODEL_DIR} ${IMAGENET_DATA_PATH})
# vgg19 int8
set(INT8_VGG19_MODEL_DIR "${INT8_DATA_DIR}/vgg19")
download_int8_data(${INT8_VGG19_MODEL_DIR} "VGG19_int8_model.tar.gz" )
inference_analysis_api_int8_test_run(test_analyzer_int8_vgg19 ${INT8_IMG_CLASS_TEST_APP} ${INT8_VGG19_MODEL_DIR} ${IMAGENET_DATA_PATH})
# TODO(grygielski) Enable after MKL-DNN 1.0 merge
# set(INT8_VGG19_MODEL_DIR "${INT8_DATA_DIR}/vgg19")
# download_int8_data(${INT8_VGG19_MODEL_DIR} "VGG19_int8_model.tar.gz" )
# inference_analysis_api_int8_test_run(test_analyzer_int8_vgg19 ${INT8_IMG_CLASS_TEST_APP} ${INT8_VGG19_MODEL_DIR} ${IMAGENET_DATA_PATH})
# googlenet int8
set(INT8_GOOGLENET_MODEL_DIR "${INT8_DATA_DIR}/googlenet")
......
......@@ -31,7 +31,7 @@ class ElementwiseMulOp : public ElementwiseOp {
#ifdef PADDLE_WITH_MKLDNN
static bool AreDimsAndFormatCorrect(const framework::ExecutionContext& ctx,
int simd_width,
mkldnn::memory::format x_format) {
mkldnn::memory::format_tag x_format) {
using Tensor = framework::Tensor;
using paddle::framework::vectorize;
using mkldnn::memory;
......@@ -54,7 +54,7 @@ class ElementwiseMulOp : public ElementwiseOp {
if (platform::CanMKLDNNBeUsed(ctx)) {
bool can_use_avx512_kernel =
platform::MayIUse(platform::avx512f) &&
AreDimsAndFormatCorrect(ctx, 16, memory::format::nChw16c);
AreDimsAndFormatCorrect(ctx, 16, memory::format_tag::nChw16c);
if (can_use_avx512_kernel) {
return framework::OpKernelType(input_data_type, ctx.GetPlace(),
framework::DataLayout::kMKLDNN,
......
......@@ -50,12 +50,14 @@ class EltwiseAddMKLDNNKernel : public framework::OpKernel<T> {
auto y_dims_untrimed = y->dims();
auto z_dims = z->dims();
mkldnn::stream astream(mkldnn_engine);
// Execute default elementwise_add operator when
// broadcast operations need to performed.
if (x_dims != y_dims_untrimed) {
Tensor _x;
MKLDNNMemoryFormat format;
std::vector<int> src_x_tz = framework::vectorize<int>(x_dims);
auto src_x_tz = framework::vectorize<int64_t>(x_dims);
if ((src_x_tz.size() == 3 &&
x->format() != (format = MKLDNNMemoryFormat::ncw)) ||
......@@ -69,8 +71,8 @@ class EltwiseAddMKLDNNKernel : public framework::OpKernel<T> {
auto out_format = platform::MKLDNNFormatForSize(
x_dims.size(), MKLDNNMemoryFormat::nchw);
const std::string key = platform::CreateKey(
src_x_tz, x->format(), out_format, std::to_string(in_type));
const std::string key =
platform::CreateKey(src_x_tz, x->format(), out_format, in_type);
platform::ReorderMKLDNNHandler handler(src_x_tz, x->type(), in_type,
dev_ctx, mkldnn_engine, key);
......@@ -83,9 +85,8 @@ class EltwiseAddMKLDNNKernel : public framework::OpKernel<T> {
auto x_reorder = handler.AcquireReorder(x_memory_p, user_x_memory_p);
std::vector<primitive> pipeline;
pipeline.push_back(*x_reorder);
stream(stream::kind::eager).submit(pipeline).wait();
x_reorder->execute(astream, *user_x_memory_p, *x_memory_p);
astream.wait();
} else {
format = x->format();
_x.ShareDataWith(*x);
......@@ -122,19 +123,18 @@ class EltwiseAddMKLDNNKernel : public framework::OpKernel<T> {
} else {
PADDLE_ENFORCE_EQ(x->layout(), DataLayout::kMKLDNN,
"Wrong layout set for X tensor");
PADDLE_ENFORCE_NE(x->format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(x->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for X tensor");
PADDLE_ENFORCE_EQ(y->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Y tensor");
PADDLE_ENFORCE_NE(y->format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(y->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Y tensor");
std::vector<int> src_x_tz = framework::vectorize<int>(x_dims);
std::vector<int> src_y_tz = framework::vectorize<int>(y_dims_untrimed);
std::vector<int> dst_tz = framework::vectorize<int>(z_dims);
auto src_x_tz = framework::vectorize<int64_t>(x_dims);
auto src_y_tz = framework::vectorize<int64_t>(y_dims_untrimed);
auto dst_tz = framework::vectorize<int64_t>(z_dims);
std::vector<memory::primitive_desc> srcs_pd;
std::vector<float> scales = {1.0f, 1.0f};
const std::string key =
......@@ -156,18 +156,17 @@ class EltwiseAddMKLDNNKernel : public framework::OpKernel<T> {
auto sum_pd = handler.AcquireSumPrimitiveDescriptor(
{src_x_memory, src_y_memory}, scales, dst_md);
T* z_data = z->mutable_data<T>(ctx.GetPlace(),
sum_pd->dst_primitive_desc().get_size());
T* z_data =
z->mutable_data<T>(ctx.GetPlace(), sum_pd->dst_desc().get_size());
auto dst_memory = handler.AcquireDstMemoryFromPrimitive(z_data);
std::vector<primitive::at> inputs({*src_x_memory, *src_y_memory});
auto sum_prim = handler.AcquireSum(dst_memory, &inputs);
auto sum_prim = handler.AcquireSum();
std::vector<primitive> pipeline;
pipeline.push_back(*sum_prim);
stream(stream::kind::eager).submit(pipeline).wait();
sum_prim->execute(astream, {{MKLDNN_ARG_MULTIPLE_SRC, *src_x_memory},
{MKLDNN_ARG_MULTIPLE_SRC + 1, *src_y_memory},
{MKLDNN_ARG_DST, *dst_memory}});
astream.wait();
z->set_layout(DataLayout::kMKLDNN);
z->set_format(platform::GetMKLDNNFormat(*dst_memory));
......
......@@ -70,7 +70,7 @@ class ElementwiseMulMKLDNNKernel : public framework::OpKernel<T> {
auto x_dims = x->dims();
auto y_dims_untrimmed = y->dims();
auto x_int_dims = paddle::framework::vectorize<int>(x_dims);
auto x_int_dims = paddle::framework::vectorize<int64_t>(x_dims);
int pre, num, post, is_run_common_broadcast;
get_mid_dims(x_dims, y_dims_untrimmed, axis, &pre, &num, &post,
......
......@@ -35,7 +35,7 @@ class MKLDNNActivationKernel
const auto *x = ctx.Input<Tensor>("X");
PADDLE_ENFORCE_EQ(x->layout(), DataLayout::kMKLDNN,
"Wrong layout set for X tensor");
PADDLE_ENFORCE_NE(x->format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(x->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for X tensor");
Functor functor;
......@@ -51,7 +51,7 @@ class MKLDNNActivationGradKernel
const auto *diff_y = ctx.Input<Tensor>(framework::GradVarName("Out"));
PADDLE_ENFORCE_EQ(diff_y->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input OutGrad tensor");
PADDLE_ENFORCE_NE(diff_y->format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(diff_y->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input OutGrad tensor");
PADDLE_ENFORCE_EQ(
......@@ -80,7 +80,7 @@ void eltwise_forward(const framework::ExecutionContext &ctx,
x->dims().size() == 2 || x->dims().size() == 3 || x->dims().size() == 4,
"Input dim must be with 2, 3 or 4");
auto src_tz = framework::vectorize<int>(x->dims());
auto src_tz = framework::vectorize<int64_t>(x->dims());
auto src_format = src_tz.size() == 2 ? MKLDNNMemoryFormat::nc : x->format();
......@@ -92,13 +92,12 @@ void eltwise_forward(const framework::ExecutionContext &ctx,
auto src_memory_p = handler.AcquireSrcMemory(x);
auto dst_memory_p = handler.AcquireDstMemory(y);
auto activation_p =
handler.AcquireForwardPrimitive(*src_memory_p, *dst_memory_p);
auto activation_p = handler.AcquireForwardPrimitive();
// push primitive to stream and wait until it's executed
std::vector<primitive> pipeline;
pipeline.push_back(*activation_p);
stream(stream::kind::eager).submit(pipeline).wait();
mkldnn::stream astream(dev_ctx.GetEngine());
activation_p->execute(astream, {{MKLDNN_ARG_FROM, *src_memory_p},
{MKLDNN_ARG_TO, *dst_memory_p}});
astream.wait();
y->set_layout(DataLayout::kMKLDNN);
y->set_format(GetMKLDNNFormat(*dst_memory_p));
......@@ -116,7 +115,7 @@ void eltwise_grad(const framework::ExecutionContext &ctx,
const T alpha = ctx.HasAttr("alpha") ? ctx.Attr<T>("alpha") : 0;
const T beta = ctx.HasAttr("beta") ? ctx.Attr<T>("beta") : 0;
auto diff_dst_tz = framework::vectorize<int>(diff_y->dims());
auto diff_dst_tz = framework::vectorize<int64_t>(diff_y->dims());
// diff_dst and src dims should be the same
auto src_format =
......@@ -132,13 +131,14 @@ void eltwise_grad(const framework::ExecutionContext &ctx,
auto src_memory_p = handler.AcquireBackwardSrcMemory(x);
auto diff_dst_memory_p = handler.AcquireDiffDstMemory(diff_y);
auto diff_src_memory_p = handler.AcquireDiffSrcMemory(diff_x);
auto activation_backward_p = handler.AcquireBackwardPrimitive(
*src_memory_p, *diff_dst_memory_p, *diff_src_memory_p);
// push primitive to stream and wait until it's executed
std::vector<primitive> pipeline;
pipeline.push_back(*activation_backward_p);
stream(stream::kind::eager).submit(pipeline).wait();
auto activation_backward_p = handler.AcquireBackwardPrimitive();
mkldnn::stream astream(dev_ctx.GetEngine());
activation_backward_p->execute(astream,
{{MKLDNN_ARG_SRC, *src_memory_p},
{MKLDNN_ARG_DIFF_DST, *diff_dst_memory_p},
{MKLDNN_ARG_DIFF_SRC, *diff_src_memory_p}});
astream.wait();
diff_x->set_layout(DataLayout::kMKLDNN);
diff_x->set_format(GetMKLDNNFormat(*diff_src_memory_p));
......
......@@ -31,9 +31,9 @@ class BatchNormMKLDNNHandler
: public platform::MKLDNNHandlerT<T, mkldnn::batch_normalization_forward,
mkldnn::batch_normalization_backward> {
public:
BatchNormMKLDNNHandler(const std::vector<int> &dims, const float &epsilon,
const unsigned &flags, const bool &global_stats,
const MKLDNNMemoryFormat fmt,
BatchNormMKLDNNHandler(const std::vector<int64_t> &dims, const float &epsilon,
const mkldnn::normalization_flags &flags,
const bool &global_stats, const MKLDNNMemoryFormat fmt,
const platform::MKLDNNDeviceContext &dev_ctx,
platform::Place cpu_place,
const std::string &uniq_name)
......@@ -48,8 +48,8 @@ class BatchNormMKLDNNHandler
: mkldnn::prop_kind::forward_training,
md, epsilon, flags);
}
BatchNormMKLDNNHandler(const std::vector<int> &dims, const float &epsilon,
const unsigned &flags,
BatchNormMKLDNNHandler(const std::vector<int64_t> &dims, const float &epsilon,
const mkldnn::normalization_flags &flags,
const MKLDNNMemoryFormat diff_fmt,
const MKLDNNMemoryFormat src_fmt,
const platform::MKLDNNDeviceContext &dev_ctx,
......@@ -70,47 +70,44 @@ class BatchNormMKLDNNHandler
std::shared_ptr<mkldnn::memory> AcquireScaleShiftMemory(T *scaleshift_data) {
return this->AcquireMemoryFromPrimitive(
this->fwd_pd_->weights_primitive_desc(), scaleshift_data,
"@scaleshift_mem_p");
this->fwd_pd_->weights_desc(), scaleshift_data, "@scaleshift_mem_p");
}
std::shared_ptr<mkldnn::memory> AcquireDiffScaleShiftMemory(
T *diff_scaleshift_data) {
return this->AcquireMemoryFromPrimitive(
this->bwd_pd_->diff_weights_primitive_desc(), diff_scaleshift_data,
"@diff_scaleshift_mem_p");
return this->AcquireMemoryFromPrimitive(this->bwd_pd_->diff_weights_desc(),
diff_scaleshift_data,
"@diff_scaleshift_mem_p");
}
std::shared_ptr<mkldnn::memory> AcquireMeanMemory(
const framework::Tensor *mean) {
const T *mean_data = mean->data<T>();
return this->AcquireMemoryFromPrimitive(
this->fwd_pd_->mean_primitive_desc(), to_void_cast<T>(mean_data),
"@mean_mem_p");
this->fwd_pd_->mean_desc(), to_void_cast<T>(mean_data), "@mean_mem_p");
}
std::shared_ptr<mkldnn::memory> AcquireMeanMemory(framework::Tensor *mean) {
T *mean_data = mean->mutable_data<T>(
this->place_, this->fwd_pd_->mean_primitive_desc().get_size());
return this->AcquireMemoryFromPrimitive(
this->fwd_pd_->mean_primitive_desc(), mean_data, "@mean_mem_p");
T *mean_data = mean->mutable_data<T>(this->place_,
this->fwd_pd_->mean_desc().get_size());
return this->AcquireMemoryFromPrimitive(this->fwd_pd_->mean_desc(),
mean_data, "@mean_mem_p");
}
std::shared_ptr<mkldnn::memory> AcquireVarianceMemory(
const framework::Tensor *variance) {
const T *variance_data = variance->data<T>();
return this->AcquireMemoryFromPrimitive(
this->fwd_pd_->variance_primitive_desc(),
to_void_cast<T>(variance_data), "@variance_mem_p");
return this->AcquireMemoryFromPrimitive(this->fwd_pd_->variance_desc(),
to_void_cast<T>(variance_data),
"@variance_mem_p");
}
std::shared_ptr<mkldnn::memory> AcquireVarianceMemory(
framework::Tensor *variance) {
T *variance_data = variance->mutable_data<T>(
this->place_, this->fwd_pd_->variance_primitive_desc().get_size());
return this->AcquireMemoryFromPrimitive(
this->fwd_pd_->variance_primitive_desc(), variance_data,
"@variance_mem_p");
this->place_, this->fwd_pd_->variance_desc().get_size());
return this->AcquireMemoryFromPrimitive(this->fwd_pd_->variance_desc(),
variance_data, "@variance_mem_p");
}
};
......@@ -140,11 +137,11 @@ class BatchNormMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(x->layout(), DataLayout::kMKLDNN,
"Wrong layout set for X tensor");
PADDLE_ENFORCE_NE(x->format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(x->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for X tensor");
auto src_tz = paddle::framework::vectorize<int>(x->dims());
auto scale_tz = paddle::framework::vectorize<int>(scale->dims());
auto src_tz = paddle::framework::vectorize<int64_t>(x->dims());
auto scale_tz = paddle::framework::vectorize<int64_t>(scale->dims());
PADDLE_ENFORCE(scale_tz.size() == 1, "Dims of scale tensor is NOT 1");
const unsigned int C = scale_tz[0];
......@@ -156,9 +153,11 @@ class BatchNormMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
shift->data<T>() + C);
// Flags are added by bitwise OR operation
unsigned flags = mkldnn::use_scale_shift; // 001
if (global_stats) flags |= mkldnn::use_global_stats; // 010
if (fuse_with_relu && is_test) flags |= mkldnn::fuse_bn_relu; // 100
auto flags = mkldnn::normalization_flags::use_scale_shift; // 001
if (global_stats)
flags |= mkldnn::normalization_flags::use_global_stats; // 010
if (fuse_with_relu && is_test)
flags |= mkldnn::normalization_flags::fuse_norm_relu; // 100
BatchNormMKLDNNHandler<T> handler(
src_tz, epsilon, flags, global_stats,
......@@ -170,38 +169,35 @@ class BatchNormMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
handler.AcquireScaleShiftMemory(scaleshift_data.data());
auto dst_memory = handler.AcquireDstMemory(y);
std::shared_ptr<mkldnn::batch_normalization_forward> batch_norm_p;
auto batch_norm_p = handler.AcquireForwardPrimitive();
std::shared_ptr<memory> mean_memory;
std::shared_ptr<memory> variance_memory;
if (global_stats) {
// mean and variance are taken from input Tensor
const auto *mean = ctx.Input<Tensor>("Mean");
const auto *variance = ctx.Input<Tensor>("Variance");
std::shared_ptr<memory> mean_memory = handler.AcquireMeanMemory(mean);
std::shared_ptr<memory> variance_memory =
handler.AcquireVarianceMemory(variance);
batch_norm_p = handler.AcquireForwardPrimitive(
*src_memory, (const mkldnn::primitive::at &)*mean_memory,
(const mkldnn::primitive::at &)*variance_memory, *scaleshift_memory,
*dst_memory);
mean_memory = handler.AcquireMeanMemory(mean);
variance_memory = handler.AcquireVarianceMemory(variance);
} else {
// mean and variance are calculated and saved in output Tensor
std::shared_ptr<memory> mean_memory =
handler.AcquireMeanMemory(batch_mean);
std::shared_ptr<memory> variance_memory =
handler.AcquireVarianceMemory(batch_variance);
batch_norm_p = handler.AcquireForwardPrimitive(
*src_memory, *scaleshift_memory, *dst_memory, *mean_memory,
*variance_memory);
mean_memory = handler.AcquireMeanMemory(batch_mean);
variance_memory = handler.AcquireVarianceMemory(batch_variance);
}
y->set_layout(DataLayout::kMKLDNN);
y->set_format(platform::GetMKLDNNFormat(*dst_memory));
std::vector<mkldnn::primitive> pipeline;
pipeline.push_back(*batch_norm_p);
mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait();
mkldnn::stream astream(dev_ctx.GetEngine());
batch_norm_p->execute(astream,
{{MKLDNN_ARG_SRC, *src_memory},
{MKLDNN_ARG_SCALE_SHIFT, *scaleshift_memory},
{MKLDNN_ARG_MEAN, *mean_memory},
{MKLDNN_ARG_VARIANCE, *variance_memory},
{MKLDNN_ARG_DST, *dst_memory}});
astream.wait();
if (!global_stats) {
// mkldnn only compute stats for current batch
......@@ -245,11 +241,11 @@ class BatchNormMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(diff_y->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input diff_y tensor");
PADDLE_ENFORCE_NE(diff_y->format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(diff_y->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input diff_y tensor");
auto src_tz = paddle::framework::vectorize<int>(x->dims());
auto scale_tz = paddle::framework::vectorize<int>(scale->dims());
auto src_tz = paddle::framework::vectorize<int64_t>(x->dims());
auto scale_tz = paddle::framework::vectorize<int64_t>(scale->dims());
PADDLE_ENFORCE(scale_tz.size() == 1, "Dims of scale tensor is NOT 1");
const unsigned int C = scale_tz[0];
......@@ -261,8 +257,9 @@ class BatchNormMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
platform::MKLDNNFormatForSize(src_tz.size(), x->format());
BatchNormMKLDNNHandler<T> handler(
src_tz, epsilon, mkldnn::use_scale_shift, dst_format, input_format,
dev_ctx, ctx.GetPlace(), ctx.InputName("SavedMean"));
src_tz, epsilon, mkldnn::normalization_flags::use_scale_shift,
dst_format, input_format, dev_ctx, ctx.GetPlace(),
ctx.InputName("SavedMean"));
// MKLDNN requires a single piece of memory for scale and shift/bias data
const size_t scaleshift_size = 2 * C;
......@@ -285,13 +282,18 @@ class BatchNormMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
handler.AcquireDiffScaleShiftMemory(diff_scaleshift_data.data());
// finally create batch_norm backward primitive
auto batch_norm_bwd_p = handler.AcquireBackwardPrimitive(
*src_memory, *mean_memory, *variance_memory, *diff_dst_memory,
*scaleshift_memory, *diff_src_memory, *diff_scaleshift_memory);
std::vector<primitive> pipeline;
pipeline.push_back(*batch_norm_bwd_p);
stream(stream::kind::eager).submit(pipeline).wait();
auto batch_norm_bwd_p = handler.AcquireBackwardPrimitive();
mkldnn::stream astream(dev_ctx.GetEngine());
batch_norm_bwd_p->execute(
astream, {{MKLDNN_ARG_SRC, *src_memory},
{MKLDNN_ARG_MEAN, *mean_memory},
{MKLDNN_ARG_VARIANCE, *variance_memory},
{MKLDNN_ARG_DIFF_DST, *diff_dst_memory},
{MKLDNN_ARG_SCALE_SHIFT, *scaleshift_memory},
{MKLDNN_ARG_DIFF_SRC, *diff_src_memory},
{MKLDNN_ARG_DIFF_SCALE_SHIFT, *diff_scaleshift_memory}});
astream.wait();
T *diff_scale_data = diff_scale->mutable_data<T>(ctx.GetPlace());
T *diff_shift_data = diff_shift->mutable_data<T>(ctx.GetPlace());
......
......@@ -32,19 +32,17 @@ static void EnforceLayouts(const std::vector<const Tensor*> inputs) {
for (auto* input : inputs) {
PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input tensor");
PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input tensor");
}
}
static memory::primitive_desc CreateMemPrimDesc(const Tensor& input,
const mkldnn::engine& engine,
const memory::data_type& dt) {
const auto dims = paddle::framework::vectorize<int>(input.dims());
static memory::desc CreateMemDesc(const Tensor& input,
const memory::data_type& dt) {
const auto dims = paddle::framework::vectorize<int64_t>(input.dims());
const auto format = input.format();
auto description = memory::desc(dims, dt, format);
auto mem_prim_desc = memory::primitive_desc(description, engine);
return mem_prim_desc;
auto mem_desc = memory::desc(dims, dt, format);
return mem_desc;
}
static platform::CPUPlace GetCpuPlace(
......@@ -70,14 +68,15 @@ class ConcatPrimitiveFactory {
const memory::data_type& dt = memory::data_type::f32) {
CreateSourcesDescriptors(multi_input, mkldnn_engine, dt);
auto dst_desc = CreateDstMemDescriptor(output, dt);
return concat::primitive_desc(dst_desc, concat_axis, srcs_pd);
return concat::primitive_desc(dst_desc, concat_axis, srcs_d, mkldnn_engine);
}
concat CreateConcatPrimitive(const concat::primitive_desc& concat_pd,
Tensor* output, platform::CPUPlace place) {
CreateSourcePrimitiveAts();
dst_mem = CreateDstMemory(concat_pd, output, place);
return concat(concat_pd, inputs, dst_mem.get());
Tensor* output, platform::CPUPlace place,
const mkldnn::engine& mkldnn_engine) {
dst_mem = mkldnn::memory(concat_pd.dst_desc(), mkldnn_engine,
output->mutable_data<T>(place));
return concat(concat_pd);
}
void SetSrcDataHandleByIndex(const std::vector<memory>& srcs, const size_t& i,
......@@ -96,41 +95,25 @@ class ConcatPrimitiveFactory {
private:
memory::desc CreateDstMemDescriptor(Tensor* output,
const memory::data_type& dt) {
auto dst_dims = paddle::framework::vectorize<int>(output->dims());
auto dst_dims = paddle::framework::vectorize<int64_t>(output->dims());
return memory::desc(dst_dims, dt, MKLDNNMemoryFormat::any);
}
mkldnn::memory CreateDstMemory(const concat::primitive_desc& concat_pd,
Tensor* output,
const platform::CPUPlace& place) {
return memory(concat_pd.dst_primitive_desc(),
output->mutable_data<T>(place));
}
void CreateSourcesDescriptors(const std::vector<const Tensor*> multi_input,
const mkldnn::engine& mkldnn_engine,
const memory::data_type& dt) {
for (size_t i = 0; i < multi_input.size(); i++) {
auto mem_prim_desc =
CreateMemPrimDesc(*multi_input[i], mkldnn_engine, dt);
srcs_pd.push_back(mem_prim_desc);
srcs.push_back(
memory(mem_prim_desc, to_void_cast(multi_input[i]->data<T>())));
}
}
void CreateSourcePrimitiveAts() {
inputs.reserve(srcs.size());
for (size_t i = 0; i < srcs.size(); i++) {
inputs.push_back(srcs[i]);
auto mem_desc = CreateMemDesc(*multi_input[i], dt);
srcs_d.push_back(mem_desc);
srcs.push_back(memory(mem_desc, mkldnn_engine,
to_void_cast(multi_input[i]->data<T>())));
}
}
private:
std::vector<memory::primitive_desc> srcs_pd;
std::vector<memory> srcs;
std::vector<primitive::at> inputs;
boost::optional<memory> dst_mem;
std::vector<memory::desc> srcs_d;
std::vector<mkldnn::memory> srcs;
boost::optional<mkldnn::memory> dst_mem;
};
template <typename T>
......@@ -140,7 +123,7 @@ class ConcatMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto multi_input = ctx.MultiInput<Tensor>("X");
EnforceLayouts(multi_input);
Tensor* output = ctx.Output<Tensor>("Out");
int64_t concat_axis = static_cast<int64_t>(ctx.Attr<int>("axis"));
int concat_axis = ctx.Attr<int>("axis");
auto& dev_ctx =
ctx.template device_context<paddle::platform::MKLDNNDeviceContext>();
auto place = GetCpuPlace(ctx);
......@@ -152,6 +135,7 @@ class ConcatMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
std::string key = platform::CreateKey(
paddle::framework::vectorize<int>(multi_input[0]->dims()),
ctx.OutputName("Out"), dt, platform::ThreadIDasStr());
const std::string key_prim = key + "@concat_p";
const std::string key_concat_pd = key + "@concat_pd";
const std::string key_srcs = key + "@concat_srcs";
......@@ -162,14 +146,13 @@ class ConcatMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
std::shared_ptr<memory> dst_mem;
auto concat_p = std::static_pointer_cast<concat>(dev_ctx.GetBlob(key_prim));
const auto& mkldnn_engine = dev_ctx.GetEngine();
if (concat_p == nullptr) {
const auto& mkldnn_engine = dev_ctx.GetEngine();
concat_pd = std::make_shared<concat::primitive_desc>(
prim_creator.CreateConcatPrimDescriptor(multi_input, output,
static_cast<int>(concat_axis),
mkldnn_engine, dt));
concat_p = std::make_shared<concat>(
prim_creator.CreateConcatPrimitive(*concat_pd, output, place));
prim_creator.CreateConcatPrimDescriptor(
multi_input, output, concat_axis, mkldnn_engine, dt));
concat_p = std::make_shared<concat>(prim_creator.CreateConcatPrimitive(
*concat_pd, output, place, mkldnn_engine));
srcs = std::make_shared<std::vector<memory>>(prim_creator.GetSrcs());
dst_mem = std::make_shared<memory>(prim_creator.GetDst());
dev_ctx.SetBlob(key_prim, concat_p);
......@@ -189,7 +172,15 @@ class ConcatMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
prim_creator.SetDstDataHandle(*dst_mem, output->mutable_data<T>(place));
}
stream(stream::kind::eager).submit({*concat_p}).wait();
mkldnn::stream astream(mkldnn_engine);
std::unordered_map<int, memory> args;
for (size_t i = 0; i < multi_input.size(); ++i) {
args.insert({MKLDNN_ARG_MULTIPLE_SRC + i, (*srcs).at(i)});
}
args.insert({MKLDNN_ARG_DST, *dst_mem});
concat_p->execute(astream, args);
astream.wait();
output->set_layout(DataLayout::kMKLDNN);
output->set_format(platform::GetMKLDNNFormat(*dst_mem));
......
......@@ -48,12 +48,12 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input tensor");
PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input tensor");
PADDLE_ENFORCE_EQ(filter->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Filter tensor");
PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Filter tensor");
PADDLE_ENFORCE_EQ(input->dims().size(), 4,
......@@ -64,16 +64,22 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
if (bias) {
PADDLE_ENFORCE_EQ(bias->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Bias tensor");
PADDLE_ENFORCE_NE(bias->format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(bias->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Bias tensor");
PADDLE_ENFORCE_EQ(bias->dims().size(), 1,
"Bias must only have 1 dimension, i.e. X");
}
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
std::vector<int> strides_temp = ctx.Attr<std::vector<int>>("strides");
std::vector<int64_t> strides(begin(strides_temp), end(strides_temp));
std::vector<int> paddings_temp = ctx.Attr<std::vector<int>>("paddings");
std::vector<int64_t> paddings(begin(paddings_temp), end(paddings_temp));
std::vector<int> dilations_temp = ctx.Attr<std::vector<int>>("dilations");
std::vector<int64_t> dilations(begin(dilations_temp), end(dilations_temp));
int groups = ctx.Attr<int>("groups");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
......@@ -83,7 +89,7 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
auto ksize = framework::vectorize<int>(filter_data_dims);
auto ksize = framework::vectorize(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
data_dims, strides, ksize);
......@@ -95,8 +101,9 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
const T* input_data = input->data<T>();
const T* filter_data = filter->data<T>();
auto src_tz = paddle::framework::vectorize<int>(input->dims());
auto iohw_weights_tz = paddle::framework::vectorize<int>(filter->dims());
auto src_tz = paddle::framework::vectorize<int64_t>(input->dims());
auto iohw_weights_tz =
paddle::framework::vectorize<int64_t>(filter->dims());
auto weights_tz = iohw_weights_tz;
// IOHW -> OIHW
......@@ -137,7 +144,7 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
weights_tz[3] = h;
weights_tz[4] = w;
}
auto dst_tz = paddle::framework::vectorize<int>(output->dims());
auto dst_tz = paddle::framework::vectorize<int64_t>(output->dims());
// Get unique name for storing MKLDNN primitives
......@@ -165,7 +172,7 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
src_tz, platform::MKLDNNGetDataType<T>(), chosen_memory_format);
auto weights_md = platform::MKLDNNMemDesc(
weights_tz, platform::MKLDNNGetDataType<T>(), chosen_memory_format);
std::vector<int> bias_tz;
std::vector<int64_t> bias_tz;
auto dst_md = platform::MKLDNNMemDesc(
dst_tz, platform::MKLDNNGetDataType<T>(), chosen_memory_format);
......@@ -177,7 +184,7 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto fwd_prop_kind = is_test ? mkldnn::prop_kind::forward_inference
: mkldnn::prop_kind::forward_training;
if (bias) {
bias_tz = paddle::framework::vectorize<int>(bias->dims());
bias_tz = paddle::framework::vectorize<int64_t>(bias->dims());
auto bias_md = platform::MKLDNNMemDesc(
bias_tz, platform::MKLDNNGetDataType<T>(), MKLDNNMemoryFormat::x);
conv_transpose_pd = handler.AcquireConvolutionPrimitiveDescriptor(
......@@ -203,15 +210,14 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto weights_memory_p = handler.AcquireWeightsMemoryFromPrimitive(
user_weights_memory_p, pipeline, is_test);
std::shared_ptr<mkldnn::memory> dst_memory_p;
auto output_data =
output->mutable_data<T>(ctx.GetPlace(), handler.GetDstMemorySize());
dst_memory_p = handler.AcquireDstMemoryFromPrimitive(
auto dst_memory_p = handler.AcquireDstMemoryFromPrimitive(
platform::to_void_cast<T>(output_data));
// create convolution op primitive
std::shared_ptr<mkldnn::deconvolution_forward> conv_p;
auto conv_p = handler.AcquireConvolution();
mkldnn::stream astream(mkldnn_engine);
if (bias) {
const T* bias_data = bias->data<T>();
auto user_bias_md = platform::MKLDNNMemDesc(
......@@ -221,16 +227,17 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto bias_memory_p =
handler.AcquireBiasMemoryFromPrimitive(user_bias_memory_p, pipeline);
conv_p = handler.AcquireConvolution(src_memory_p, weights_memory_p,
bias_memory_p, dst_memory_p);
conv_p->execute(astream, {{MKLDNN_ARG_SRC, *src_memory_p},
{MKLDNN_ARG_WEIGHTS, *weights_memory_p},
{MKLDNN_ARG_BIAS, *bias_memory_p},
{MKLDNN_ARG_DST, *dst_memory_p}});
} else {
conv_p = handler.AcquireConvolution(src_memory_p, weights_memory_p,
dst_memory_p);
conv_p->execute(astream, {{MKLDNN_ARG_SRC, *src_memory_p},
{MKLDNN_ARG_WEIGHTS, *weights_memory_p},
{MKLDNN_ARG_DST, *dst_memory_p}});
}
// push primitive to stream and wait until it's executed
pipeline.push_back(*conv_p);
mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait();
astream.wait();
output->set_layout(DataLayout::kMKLDNN);
output->set_format(platform::GetMKLDNNFormat(*dst_memory_p));
......
......@@ -46,9 +46,8 @@ class DeQuantOpKernel : public framework::OpKernel<T> {
float* output_data = output->mutable_data<float>(ctx.GetPlace());
std::vector<float> reorder_scale = {1.0f / scale_data};
std::vector<primitive> pipeline;
auto src_tz = paddle::framework::vectorize<int>(input->dims());
auto dst_tz = paddle::framework::vectorize<int>(output->dims());
auto src_tz = paddle::framework::vectorize<int64_t>(input->dims());
auto dst_tz = paddle::framework::vectorize<int64_t>(output->dims());
mkldnn::memory::data_type src_dt =
paddle::framework::ToMKLDNNDataType(input->type());
MKLDNNMemoryFormat src_fmt = input->format();
......@@ -69,23 +68,20 @@ class DeQuantOpKernel : public framework::OpKernel<T> {
attri.set_output_scales(mask, reorder_scale);
auto src_md = platform::MKLDNNMemDesc({src_tz}, src_dt, src_fmt);
auto src_pd = mkldnn::memory::primitive_desc(src_md, engine);
src_memory =
std::make_shared<mkldnn::memory>(src_pd, to_void_cast<T>(input_data));
std::shared_ptr<primitive::at> src_memory_p =
std::shared_ptr<primitive::at>(new primitive::at(*src_memory));
auto dst_md = platform::MKLDNNMemDesc(
{dst_tz}, memory::data_type::f32,
platform::MKLDNNFormatForSize(dst_tz.size(), memory::format::nchw));
auto dst_pd = mkldnn::memory::primitive_desc(dst_md, engine);
src_memory = std::make_shared<mkldnn::memory>(
src_md, engine, to_void_cast<T>(input_data));
auto dst_md =
platform::MKLDNNMemDesc({dst_tz}, memory::data_type::f32,
platform::MKLDNNFormatForSize(
dst_tz.size(), MKLDNNMemoryFormat::nchw));
dst_memory = std::make_shared<mkldnn::memory>(
dst_pd, to_void_cast<float>(output_data));
dst_md, engine, to_void_cast<float>(output_data));
auto reorder_pd = std::shared_ptr<reorder::primitive_desc>(
new reorder::primitive_desc(src_pd, dst_pd, attri));
reorder_p = std::shared_ptr<reorder>(
new reorder(*reorder_pd, *src_memory_p, *dst_memory));
new reorder::primitive_desc(*src_memory, *dst_memory, attri));
reorder_p = std::shared_ptr<reorder>(new reorder(*reorder_pd));
dev_ctx.SetBlob(key_prim, reorder_p);
dev_ctx.SetBlob(key_src_mem, src_memory);
dev_ctx.SetBlob(key_dst_mem, dst_memory);
......@@ -99,8 +95,9 @@ class DeQuantOpKernel : public framework::OpKernel<T> {
dst_memory->set_data_handle(output->mutable_data<float>(ctx.GetPlace()));
}
pipeline.push_back(*reorder_p);
stream(stream::kind::eager).submit(pipeline).wait();
mkldnn::stream astream(engine);
reorder_p->execute(astream, *src_memory, *dst_memory);
astream.wait();
output->set_layout(DataLayout::kMKLDNN);
output->set_format(GetMKLDNNFormat(*dst_memory));
......
......@@ -42,16 +42,16 @@ class FCPrimitiveFactory {
public:
explicit FCPrimitiveFactory(const mkldnn::engine& engine) : engine_(engine) {}
inner_product_forward CreateFcPrimitive(const LoDTensor* input,
const Tensor* weights,
const Tensor* bias, LoDTensor* output,
const ExecutionContext& ctx) {
void ExecuteFcPrimitive(const LoDTensor* input, const Tensor* weights,
const Tensor* bias, LoDTensor* output,
const ExecutionContext& ctx) {
RecomputeOutputDims(ctx, input, weights, output);
// If primitive has already been created and cached, don't create new one,
// but update input and output data pointers and return it.
if (fc_) {
UpdateDataPointers(ctx, output, input);
return *fc_;
this->Execute();
return;
}
auto src_desc = CreateMemDescriptor<T_in>(input, input->format());
input_ = CreateMemory<T_in>(src_desc, input);
......@@ -72,7 +72,22 @@ class FCPrimitiveFactory {
auto dst_desc = CreateMemDescriptor<T_out>(output, MKLDNNMemoryFormat::any);
fc_ = CreateFcPrimitive(*input_, *weights_, dst_desc, bias, output, ctx);
return *fc_;
this->Execute();
}
void Execute() {
mkldnn::stream astream(engine_);
if (bias_) {
fc_->execute(astream, {{MKLDNN_ARG_SRC, *input_},
{MKLDNN_ARG_WEIGHTS, *weights_},
{MKLDNN_ARG_BIAS, *bias_},
{MKLDNN_ARG_DST, *output_}});
} else {
fc_->execute(astream, {{MKLDNN_ARG_SRC, *input_},
{MKLDNN_ARG_WEIGHTS, *weights_},
{MKLDNN_ARG_DST, *output_}});
}
astream.wait();
}
private:
......@@ -83,7 +98,7 @@ class FCPrimitiveFactory {
// If the primitive exists, but the output tensor has changed its
// variable, update its format to what has been determined in first
// call to CreateFcPrimitive method.
if (out->format() == MKLDNNMemoryFormat::format_undef) {
if (out->format() == MKLDNNMemoryFormat::undef) {
auto output_format = platform::GetMKLDNNFormat(*output_);
out->set_format((MKLDNNMemoryFormat)output_format);
}
......@@ -94,36 +109,37 @@ class FCPrimitiveFactory {
using format = MKLDNNMemoryFormat;
switch (fmt) {
case format::nChw16c:
return format::oIhw16i;
return format::aBcd16b;
case format::nChw8c:
return format::oIhw8i;
return format::aBcd8b;
case format::nchw:
return format::oihw;
case format::nhwc:
return format::hwio;
default:
return format::format_undef;
return format::undef;
}
}
// Convert data from one data format to another
mkldnn::memory Reorder(const memory::desc& src_desc,
const memory::desc& dst_desc, const void* src_data) {
auto src_mem = memory({src_desc, engine_}, const_cast<void*>(src_data));
auto dst_mem = memory({dst_desc, engine_});
const memory::desc& dst_desc, void* src_data) {
auto src_mem = memory(src_desc, engine_, src_data);
auto dst_mem = memory(dst_desc, engine_);
auto reorder = mkldnn::reorder(src_mem, dst_mem);
stream(stream::kind::eager).submit({reorder}).wait();
mkldnn::stream astream(engine_);
reorder.execute(astream, src_mem, dst_mem);
astream.wait();
return dst_mem;
}
// Convert data from one data format to another and rescale it.
// If the desired data type is (un)signed int8, quantization occurs here.
mkldnn::memory Reorder(const memory& src_mem,
const memory::primitive_desc& dst_pd,
mkldnn::memory Reorder(const memory& src_mem, const memory::desc& dst_md,
const std::vector<float>& scale_data) {
mkldnn::memory dst_mem = mkldnn::memory(dst_pd);
mkldnn::memory dst_mem = mkldnn::memory(dst_md, engine_);
mkldnn::primitive_attr attributes;
// According to MKL-DNN's documentation mask determines along which
// dimensions should the scale be applied.
......@@ -133,19 +149,19 @@ class FCPrimitiveFactory {
// becuase we perform per-output-channel quantization
int mask = CreateMask(0, scale_data.size() > 1);
attributes.set_output_scales(mask, scale_data);
auto reorder =
mkldnn::reorder(mkldnn::reorder::primitive_desc(
src_mem.get_primitive_desc(), dst_pd, attributes),
src_mem, dst_mem);
auto reorder = mkldnn::reorder({src_mem, dst_mem, attributes});
stream(stream::kind::eager).submit({reorder}).wait();
mkldnn::stream astream(engine_);
reorder.execute(astream,
{{MKLDNN_ARG_FROM, src_mem}, {MKLDNN_ARG_TO, dst_mem}});
astream.wait();
return dst_mem;
}
template <typename T>
static mkldnn::memory::desc CreateMemDescriptor(const std::vector<int>& dims,
MKLDNNMemoryFormat format) {
static mkldnn::memory::desc CreateMemDescriptor(
const std::vector<int64_t>& dims, MKLDNNMemoryFormat format) {
return platform::MKLDNNMemDesc(dims, platform::MKLDNNGetDataType<T>(),
format);
}
......@@ -153,28 +169,28 @@ class FCPrimitiveFactory {
template <typename T>
static mkldnn::memory::desc CreateMemDescriptor(const Tensor* tensor,
MKLDNNMemoryFormat format) {
auto dims = framework::vectorize<int>(tensor->dims());
auto dims = framework::vectorize(tensor->dims());
return CreateMemDescriptor<T>(dims, format);
}
template <typename T>
mkldnn::memory CreateMemory(const mkldnn::memory::desc& desc,
const Tensor* tensor) {
return CreateMemory(desc, tensor->data<T>());
return CreateMemory(desc, platform::to_void_cast<T>(tensor->data<T>()));
}
mkldnn::memory CreateMemory(const mkldnn::memory::desc& desc,
const void* data) {
return memory({desc, engine_}, const_cast<void*>(data));
mkldnn::memory CreateMemory(const mkldnn::memory::desc& desc, void* data) {
return memory(desc, engine_, data);
}
// Transpose weights through MKL-DNN's reorder from io to oi format.
mkldnn::memory TransposeWeights(const Tensor* weights) {
auto dims = framework::vectorize<int>(weights->dims());
auto dims = framework::vectorize(weights->dims());
std::swap(dims[0], dims[1]); // Correct output dimensions
auto src_desc = CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::io);
auto dst_desc = CreateMemDescriptor<float>(dims, MKLDNNMemoryFormat::oi);
return Reorder(src_desc, dst_desc, weights->data<float>());
return Reorder(src_desc, dst_desc,
platform::to_void_cast<float>(weights->data<float>()));
}
// Compute the bias scales so that its values correspond to the
......@@ -232,17 +248,17 @@ class FCPrimitiveFactory {
}
void QuantizeWeights(const ExecutionContext& ctx) {
auto quantized_desc = weights_->get_primitive_desc().desc();
auto quantized_desc = weights_->get_desc();
quantized_desc.data.data_type =
(mkldnn_data_type_t)platform::MKLDNNGetDataType<T_w>();
weights_ = Reorder(*weights_, {quantized_desc, engine_},
weights_ = Reorder(*weights_, quantized_desc,
ctx.Attr<std::vector<float>>("Scale_weights"));
}
void QuantizeBias(const inner_product_forward::primitive_desc& fc_prim_desc,
const ExecutionContext& ctx) {
auto bias_scales = ComputeBiasScales(ctx);
bias_ = Reorder(*bias_, fc_prim_desc.bias_primitive_desc(), bias_scales);
bias_ = Reorder(*bias_, fc_prim_desc.bias_desc(), bias_scales);
}
// Fuse relu into FC with activation type attribute has been set to 'relu'
......@@ -273,8 +289,8 @@ class FCPrimitiveFactory {
const ExecutionContext& ctx) {
// Acquire descriptors needed for creation of inner_product primitive
// descriptor
const auto weights_desc = weights_memory.get_primitive_desc().desc();
const auto src_desc = src_memory.get_primitive_desc().desc();
const auto weights_desc = weights_memory.get_desc();
const auto src_desc = src_memory.get_desc();
// Based on provided attributes, create attributes used by MKL-DNN to
// enable fused post-op activations such as 'relu'
const auto attrs = CreatePostOps(ctx);
......@@ -294,15 +310,12 @@ class FCPrimitiveFactory {
output_ = CreateDstMemory(fc_prim_desc, ctx, output);
// Return MKL-DNN primitive ready to be fed into pipeline and executed
return inner_product_forward(fc_prim_desc, src_memory, weights_memory,
*bias_, *output_);
return inner_product_forward(fc_prim_desc);
} else {
auto fc_prim_desc =
CreateFcPrimDesc(src_desc, weights_desc, dst_desc, attrs);
output_ = CreateDstMemory(fc_prim_desc, ctx, output);
return inner_product_forward(fc_prim_desc, src_memory, weights_memory,
*output_);
return inner_product_forward(fc_prim_desc);
}
}
......@@ -345,8 +358,8 @@ class FCPrimitiveFactory {
// perform a converion.
mkldnn::memory CreateFourDimWeightsMemory(const Tensor* input,
const Tensor* weights) {
auto input_dims = framework::vectorize<int>(input->dims());
auto weight_dims = framework::vectorize<int>(weights->dims());
auto input_dims = framework::vectorize(input->dims());
auto weight_dims = framework::vectorize(weights->dims());
auto dims = {weight_dims[1], input_dims[1], input_dims[2], input_dims[3]};
auto dst_format = MatchWeightFormat(input->format());
......@@ -361,11 +374,11 @@ class FCPrimitiveFactory {
mkldnn::memory CreateDstMemory(
const mkldnn::inner_product_forward::primitive_desc& fc_prim_desc,
const ExecutionContext& ctx, Tensor* output) {
auto dst_prim_desc = fc_prim_desc.dst_primitive_desc();
auto buffer_size = dst_prim_desc.get_size();
auto dst_desc = fc_prim_desc.dst_desc();
auto buffer_size = dst_desc.get_size();
T_out* output_data =
output->mutable_data<T_out>(ctx.GetPlace(), buffer_size);
memory dst_mem(dst_prim_desc, to_void_cast<T_out>(output_data));
memory dst_mem(dst_desc, engine_, to_void_cast<T_out>(output_data));
output->set_format(platform::GetMKLDNNFormat(dst_mem));
return dst_mem;
}
......@@ -421,25 +434,24 @@ GetPrimitiveFactory(const MKLDNNDeviceContext& dev_ctx,
// Choose appropriate primitive factory implementation based on inferred
// output type (uint8, int8 or float).
template <typename T_in, typename T_w>
static inner_product_forward GetFcPrimitive(
const MKLDNNDeviceContext& dev_ctx, const ExecutionContext& ctx,
const LoDTensor* input, const Tensor* w, const Tensor* bias,
LoDTensor* output, const mkldnn::engine& mkldnn_engine, bool fuse_relu,
bool force_fp32_output) {
static void ExecuteFc(const MKLDNNDeviceContext& dev_ctx,
const ExecutionContext& ctx, const LoDTensor* input,
const Tensor* w, const Tensor* bias, LoDTensor* output,
const mkldnn::engine& mkldnn_engine, bool fuse_relu,
bool force_fp32_output) {
constexpr bool is_int8 =
std::is_same<T_in, int8_t>::value || std::is_same<T_in, uint8_t>::value;
if (!is_int8 || force_fp32_output) {
return GetPrimitiveFactory<T_in, T_w, float>(dev_ctx, ctx, input, w,
mkldnn_engine)
->CreateFcPrimitive(input, w, bias, output, ctx);
GetPrimitiveFactory<T_in, T_w, float>(dev_ctx, ctx, input, w, mkldnn_engine)
->ExecuteFcPrimitive(input, w, bias, output, ctx);
} else if (fuse_relu) {
return GetPrimitiveFactory<T_in, T_w, uint8_t>(dev_ctx, ctx, input, w,
mkldnn_engine)
->CreateFcPrimitive(input, w, bias, output, ctx);
GetPrimitiveFactory<T_in, T_w, uint8_t>(dev_ctx, ctx, input, w,
mkldnn_engine)
->ExecuteFcPrimitive(input, w, bias, output, ctx);
} else {
return GetPrimitiveFactory<T_in, T_w, int8_t>(dev_ctx, ctx, input, w,
mkldnn_engine)
->CreateFcPrimitive(input, w, bias, output, ctx);
GetPrimitiveFactory<T_in, T_w, int8_t>(dev_ctx, ctx, input, w,
mkldnn_engine)
->ExecuteFcPrimitive(input, w, bias, output, ctx);
}
}
......@@ -461,10 +473,8 @@ class FCMKLDNNOpKernel : public framework::OpKernel<T_in> {
bool fuse_relu = ctx.Attr<std::string>("activation_type") == "relu";
bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");
auto fc =
GetFcPrimitive<T_in, T_w>(dev_ctx, ctx, input, w, bias, output,
mkldnn_engine, fuse_relu, force_fp32_output);
stream(stream::kind::eager).submit({fc}).wait();
ExecuteFc<T_in, T_w>(dev_ctx, ctx, input, w, bias, output, mkldnn_engine,
fuse_relu, force_fp32_output);
output->set_layout(DataLayout::kMKLDNN);
}
......
......@@ -41,7 +41,7 @@ class GaussianMKLDNNKernel : public paddle::framework::OpKernel<T> {
}
tensor->set_layout(DataLayout::kMKLDNN);
tensor->set_format(mkldnn::memory::format::oihw);
tensor->set_format(mkldnn::memory::format_tag::oihw);
}
};
} // namespace operators
......
......@@ -49,7 +49,7 @@ class LRNMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
const float k = ctx.Attr<float>("k");
bool is_test = ctx.Attr<bool>("is_test");
auto dims = paddle::framework::vectorize<int>(x->dims());
auto dims = paddle::framework::vectorize<int64_t>(x->dims());
platform::LRNMKLDNNHandler<T> handler(dims, n, alpha, beta, k, x->format(),
is_test, dev_ctx, ctx.GetPlace(),
......@@ -58,14 +58,17 @@ class LRNMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto src_memory = handler.AcquireSrcMemory(x);
auto dst_memory = handler.AcquireDstMemory(out);
std::shared_ptr<mkldnn::memory> workspace_memory;
std::shared_ptr<mkldnn::lrn_forward> lrn_p;
if (is_test == false) {
workspace_memory = handler.AcquireWorkspaceMemory(mid);
mid->set_layout(framework::DataLayout::kMKLDNN);
auto lrn_p = handler.AcquireForwardPrimitive();
auto workspace_memory = handler.AcquireWorkspaceMemory(mid);
mid->set_layout(framework::DataLayout::kMKLDNN);
mkldnn::stream astream(dev_ctx.GetEngine());
if (!workspace_memory->get_desc().is_zero()) {
mid->set_format(platform::GetMKLDNNFormat(*workspace_memory));
lrn_p = handler.AcquireForwardPrimitive(*src_memory, *workspace_memory,
*dst_memory);
lrn_p->execute(astream, {{MKLDNN_ARG_SRC, *src_memory},
{MKLDNN_ARG_DST, *dst_memory},
{MKLDNN_ARG_WORKSPACE, *workspace_memory}});
} else {
// mid has to be allocated and filled
// k to pass LRN unit tests
......@@ -73,11 +76,12 @@ class LRNMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
mid->mutable_data<T>(ctx.GetPlace());
auto e_mid = framework::EigenTensor<T, 4>::From(*mid);
e_mid = e_mid.constant(k);
lrn_p = handler.AcquireForwardPrimitive(*src_memory, *dst_memory);
}
mid->set_format(platform::GetMKLDNNFormat(*dst_memory));
std::vector<mkldnn::primitive> pipeline = {*lrn_p};
mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait();
lrn_p->execute(astream, {{MKLDNN_ARG_SRC, *src_memory},
{MKLDNN_ARG_DST, *dst_memory}});
}
astream.wait();
out->set_layout(framework::DataLayout::kMKLDNN);
out->set_format(platform::GetMKLDNNFormat(*dst_memory));
......@@ -109,7 +113,7 @@ class LRNMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();
auto dims = paddle::framework::vectorize<int>(x->dims());
auto dims = paddle::framework::vectorize<int64_t>(x->dims());
platform::LRNMKLDNNHandler<T> handler(dims, n, alpha, beta, k, x->format(),
out_grad->format(), dev_ctx,
......@@ -120,11 +124,14 @@ class LRNMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
auto diff_dst_memory = handler.AcquireDiffDstMemory(out_grad);
auto diff_src_memory = handler.AcquireDiffSrcMemory(x_grad);
auto lrn_bwd = handler.AcquireBackwardPrimitive(
*src_memory, *diff_dst_memory, *workspace, *diff_src_memory);
auto lrn_bwd = handler.AcquireBackwardPrimitive();
std::vector<mkldnn::primitive> pipeline = {*lrn_bwd};
mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait();
mkldnn::stream astream(dev_ctx.GetEngine());
lrn_bwd->execute(astream, {{MKLDNN_ARG_SRC, *src_memory},
{MKLDNN_ARG_DIFF_DST, *diff_dst_memory},
{MKLDNN_ARG_DIFF_SRC, *diff_src_memory},
{MKLDNN_ARG_WORKSPACE, *workspace}});
astream.wait();
x_grad->set_layout(framework::DataLayout::kMKLDNN);
x_grad->set_format(platform::GetMKLDNNFormat(*diff_src_memory));
......
......@@ -59,6 +59,7 @@ class MulPrimitiveFactory {
if (mul_) {
UpdateDataPointers(ctx, output, &x_matrix);
Execute();
return *mul_;
}
......@@ -68,9 +69,18 @@ class MulPrimitiveFactory {
auto dst_desc = CreateMemDescriptor<OT>(output, MKLDNNMemoryFormat::any);
mul_ = CreateMulPrimitive(*x_input_, *y_input_, dst_desc, output, ctx);
Execute();
return *mul_;
}
void Execute() {
mkldnn::stream astream(engine_);
(*mul_).execute(astream, {{MKLDNN_ARG_SRC, *x_input_},
{MKLDNN_ARG_WEIGHTS, *y_input_},
{MKLDNN_ARG_DST, *output_}});
astream.wait();
}
protected:
template <typename T>
Tensor UpdateDataFormat(const Tensor *data, int num_col_dims,
......@@ -92,7 +102,7 @@ class MulPrimitiveFactory {
to_void_cast<T>(x_tmp.data<T>()));
x_tmp.Resize(data->dims());
x_tmp.set_format((MKLDNNMemoryFormat)dst_mdesc.data.format);
x_tmp.set_format(platform::GetMKLDNNFormat(dst_mdesc));
data_matrix = framework::ReshapeToMatrix(x_tmp, num_col_dims);
} else {
data_matrix = framework::ReshapeToMatrix(*data, num_col_dims);
......@@ -106,7 +116,7 @@ class MulPrimitiveFactory {
x_input_->set_data_handle(to_void_cast<XT>(in->data<XT>()));
output_->set_data_handle(out->mutable_data<OT>(ctx.GetPlace()));
if (out->format() == MKLDNNMemoryFormat::format_undef) {
if (out->format() == MKLDNNMemoryFormat::undef) {
auto output_format = platform::GetMKLDNNFormat(*output_);
out->set_format((MKLDNNMemoryFormat)output_format);
}
......@@ -116,48 +126,50 @@ class MulPrimitiveFactory {
memory::desc CreateMemDescriptor(
const Tensor *tensor, MKLDNNMemoryFormat format,
memory::data_type type = platform::MKLDNNGetDataType<T>()) {
auto dims = framework::vectorize<int>(tensor->dims());
auto dims = framework::vectorize<int64_t>(tensor->dims());
return platform::MKLDNNMemDesc(dims, type, format);
}
template <typename T>
memory::desc CreateMemDescriptor(
const std::vector<int> &dims, MKLDNNMemoryFormat format,
const std::vector<int64_t> &dims, MKLDNNMemoryFormat format,
memory::data_type type = platform::MKLDNNGetDataType<T>()) {
return platform::MKLDNNMemDesc(dims, type, format);
}
template <typename T>
memory CreateMemory(const memory::desc &desc, const Tensor *tensor) {
return memory({desc, engine_}, to_void_cast<T>(tensor->data<T>()));
return memory(desc, engine_, to_void_cast<T>(tensor->data<T>()));
}
memory CreateDstMemory(
const inner_product_forward::primitive_desc &mul_prim_desc,
const ExecutionContext &ctx, Tensor *output) {
auto dst_prim_desc = mul_prim_desc.dst_primitive_desc();
auto buffer_size = dst_prim_desc.get_size();
auto dst_desc = mul_prim_desc.dst_desc();
auto buffer_size = dst_desc.get_size();
OT *output_data = output->mutable_data<OT>(ctx.GetPlace(), buffer_size);
memory dst_mem(dst_prim_desc, to_void_cast<OT>(output_data));
output->set_format(platform::GetMKLDNNFormat(dst_mem));
return dst_mem;
output->set_format(paddle::platform::GetMKLDNNFormat(dst_desc));
return memory(dst_desc, engine_, to_void_cast<OT>(output_data));
}
memory Reorder(const memory::desc &src_desc, const memory::desc &dst_desc,
void *src_data, void *dst_data = NULL) {
auto src_mem = memory({src_desc, engine_}, src_data);
auto dst_mem = dst_data ? memory({dst_desc, engine_}, dst_data)
: memory({dst_desc, engine_});
auto src_mem = memory(src_desc, engine_, src_data);
auto dst_mem = dst_data ? memory(dst_desc, engine_, dst_data)
: memory(dst_desc, engine_);
auto reorder = mkldnn::reorder(src_mem, dst_mem);
stream(stream::kind::eager).submit({reorder}).wait();
mkldnn::stream astream(engine_);
reorder.execute(astream, src_mem, dst_mem);
astream.wait();
return dst_mem;
}
memory TransposeInputY(const Tensor *input_y) {
auto dims = framework::vectorize<int>(input_y->dims());
auto dims = framework::vectorize<int64_t>(input_y->dims());
std::swap(dims[0], dims[1]); // Correct output dimensions
auto src_desc = CreateMemDescriptor<YT>(dims, MKLDNNMemoryFormat::io);
auto dst_desc = CreateMemDescriptor<YT>(dims, MKLDNNMemoryFormat::oi);
......@@ -169,13 +181,13 @@ class MulPrimitiveFactory {
const memory::desc &dst_desc,
Tensor *output,
const ExecutionContext &ctx) {
const auto y_desc = y_memory.get_primitive_desc().desc();
const auto x_desc = x_memory.get_primitive_desc().desc();
const auto y_desc = y_memory.get_desc();
const auto x_desc = x_memory.get_desc();
auto mul_prim_desc = CreateMulPrimDesc(x_desc, y_desc, dst_desc);
output_ = CreateDstMemory(mul_prim_desc, ctx, output);
return inner_product_forward(mul_prim_desc, x_memory, y_memory, *output_);
return inner_product_forward(mul_prim_desc);
}
inner_product_forward::primitive_desc CreateMulPrimDesc(
......@@ -228,6 +240,7 @@ class QuantMulPrimitiveFactory : public MulPrimitiveFactory<XT, YT, OT> {
if (this->mul_) {
this->UpdateDataPointers(ctx, output, &x_matrix);
this->Execute();
return *(this->mul_);
}
......@@ -243,6 +256,7 @@ class QuantMulPrimitiveFactory : public MulPrimitiveFactory<XT, YT, OT> {
this->mul_ = CreateMulPrimitive(*(this->x_input_), *(this->y_input_),
dst_desc, output, ctx);
this->Execute();
return *(this->mul_);
}
......@@ -253,22 +267,24 @@ class QuantMulPrimitiveFactory : public MulPrimitiveFactory<XT, YT, OT> {
mkldnn::primitive_attr attr;
attr.set_output_scales(mask, scale);
auto src_mem = memory({src_desc, this->engine_}, src_data);
auto dst_mem = memory({dst_desc, this->engine_});
auto src_mem = memory(src_desc, this->engine_, src_data);
auto dst_mem = memory(dst_desc, this->engine_);
auto reorder_pd = mkldnn::reorder::primitive_desc(src_mem, dst_mem, attr);
auto reorder_pd = mkldnn::reorder::primitive_desc(
src_mem.get_primitive_desc(), dst_mem.get_primitive_desc(), attr);
auto reorder = mkldnn::reorder(reorder_pd);
auto reorder = mkldnn::reorder(reorder_pd, src_mem, dst_mem);
stream(stream::kind::eager).submit({reorder}).wait();
mkldnn::stream astream(this->engine_);
reorder.execute(astream, src_mem, dst_mem);
astream.wait();
return dst_mem;
}
memory QuantInputY(memory input_y, const std::vector<float> &scale_y) {
const auto &dims = input_y.get_primitive_desc().desc().data.dims;
auto ndims = input_y.get_primitive_desc().desc().data.ndims;
auto y_dims = std::vector<int>(dims, dims + ndims);
const auto &dims = input_y.get_desc().data.dims;
auto ndims = input_y.get_desc().data.ndims;
auto y_dims = std::vector<int64_t>(dims, dims + ndims);
auto user_y_desc =
this->template CreateMemDescriptor<YT>(y_dims, MKLDNNMemoryFormat::oi);
......@@ -309,8 +325,8 @@ class QuantMulPrimitiveFactory : public MulPrimitiveFactory<XT, YT, OT> {
const memory::desc &dst_desc,
Tensor *output,
const ExecutionContext &ctx) {
const auto x_desc = x_memory.get_primitive_desc().desc();
const auto y_desc = y_memory.get_primitive_desc().desc();
const auto x_desc = x_memory.get_desc();
const auto y_desc = y_memory.get_desc();
bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");
mkldnn::primitive_attr mul_attr = CreateMulAttr(ctx, force_fp32_output);
......@@ -318,8 +334,7 @@ class QuantMulPrimitiveFactory : public MulPrimitiveFactory<XT, YT, OT> {
this->output_ = this->CreateDstMemory(mul_prim_desc, ctx, output);
return inner_product_forward(mul_prim_desc, x_memory, y_memory,
*(this->output_));
return inner_product_forward(mul_prim_desc);
}
inner_product_forward::primitive_desc CreateMulPrimDesc(
......@@ -340,9 +355,8 @@ std::shared_ptr<MulPrimitiveFactory<XT, YT, OT>> GetPrimitiveFactory(
const Tensor *input_x, const Tensor *input_y,
const mkldnn::engine &mkldnn_engine, bool enable_quant) {
const std::string key = platform::CreateKey(
input_x->type(), framework::vectorize<int>(input_x->dims()),
input_y->type(), framework::vectorize<int>(input_y->dims()),
ctx.OutputName("Out"));
input_x->type(), framework::vectorize(input_x->dims()), input_y->type(),
framework::vectorize(input_y->dims()), ctx.OutputName("Out"));
auto prim_creator = std::static_pointer_cast<MulPrimitiveFactory<XT, YT, OT>>(
dev_ctx.GetBlob(key));
......@@ -399,14 +413,12 @@ class MulMKLDNNKernel : public framework::OpKernel<XT> {
auto mul = GetMulPrimitive<XT, YT>(dev_ctx, ctx, x, y, out, mkldnn_engine);
stream(stream::kind::eager).submit({mul}).wait();
if (out_dims.size() != 2) {
out->Resize(out_dims);
}
out->set_layout(DataLayout::kMKLDNN);
out->set_format(platform::MKLDNNFormatForSize(
out_dims.size(), mkldnn::memory::format::nchw));
out->set_format(platform::MKLDNNFormatForSize(out_dims.size(),
MKLDNNMemoryFormat::nchw));
}
};
......
......@@ -43,13 +43,20 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input tensor");
PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input tensor");
std::string pooling_type = ctx.Attr<std::string>("pooling_type");
std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> ksize_temp = ctx.Attr<std::vector<int>>("ksize");
std::vector<int64_t> ksize(begin(ksize_temp), end(ksize_temp));
std::vector<int> strides_temp = ctx.Attr<std::vector<int>>("strides");
std::vector<int64_t> strides(begin(strides_temp), end(strides_temp));
std::vector<int> paddings_temp = ctx.Attr<std::vector<int>>("paddings");
std::vector<int64_t> paddings(begin(paddings_temp), end(paddings_temp));
bool global_pooling = ctx.Attr<bool>("global_pooling");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
......@@ -71,8 +78,8 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
UpdatePadding(&paddings, global_pooling, 0, padding_algorithm, data_dims,
strides, ksize);
auto src_tz = paddle::framework::vectorize<int>(input->dims());
auto dst_tz = paddle::framework::vectorize<int>(output->dims());
auto src_tz = paddle::framework::vectorize<int64_t>(input->dims());
auto dst_tz = paddle::framework::vectorize<int64_t>(output->dims());
auto is_test = ctx.Attr<bool>("is_test");
......@@ -85,22 +92,21 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto src_memory = handler.AcquireSrcMemory(input);
auto dst_memory = handler.AcquireDstMemory(output);
std::shared_ptr<mkldnn::pooling_forward> pool_p;
std::shared_ptr<mkldnn::memory> workspace_memory;
auto pool_p = handler.AcquireForwardPrimitive();
mkldnn::stream astream(dev_ctx.GetEngine());
if ((is_test == false) && (pooling_type == "max")) {
// Training
workspace_memory = handler.AcquireWorkspaceMemory();
pool_p = handler.AcquireForwardPrimitive(*src_memory, *dst_memory,
*workspace_memory);
auto workspace_memory = handler.AcquireWorkspaceMemory();
pool_p->execute(astream, {{MKLDNN_ARG_SRC, *src_memory},
{MKLDNN_ARG_DST, *dst_memory},
{MKLDNN_ARG_WORKSPACE, *workspace_memory}});
} else {
// Inference
pool_p = handler.AcquireForwardPrimitive(*src_memory, *dst_memory);
pool_p->execute(astream, {{MKLDNN_ARG_SRC, *src_memory},
{MKLDNN_ARG_DST, *dst_memory}});
}
// push primitive to stream and wait until it's executed
std::vector<mkldnn::primitive> pipeline{*pool_p};
stream(stream::kind::eager).submit(pipeline).wait();
astream.wait();
output->set_layout(DataLayout::kMKLDNN);
output->set_format(platform::GetMKLDNNFormat(*dst_memory));
......@@ -120,12 +126,12 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
PADDLE_ENFORCE_EQ(in_x->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input tensor");
PADDLE_ENFORCE_NE(in_x->format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(in_x->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input tensor");
PADDLE_ENFORCE_EQ(out_grad->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input output_grad tensor");
PADDLE_ENFORCE_NE(out_grad->format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(out_grad->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input output_grad tensor");
PADDLE_ENFORCE_EQ(
......@@ -133,9 +139,16 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
"is_test attribute should be set to False in training phase.");
std::string pooling_type = ctx.Attr<std::string>("pooling_type");
std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> ksize_temp = ctx.Attr<std::vector<int>>("ksize");
std::vector<int64_t> ksize(begin(ksize_temp), end(ksize_temp));
std::vector<int> strides_temp = ctx.Attr<std::vector<int>>("strides");
std::vector<int64_t> strides(begin(strides_temp), end(strides_temp));
std::vector<int> paddings_temp = ctx.Attr<std::vector<int>>("paddings");
std::vector<int64_t> paddings(begin(paddings_temp), end(paddings_temp));
bool global_pooling = ctx.Attr<bool>("global_pooling");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
......@@ -155,8 +168,8 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
std::vector<mkldnn::primitive> pipeline;
auto diff_src_tz = paddle::framework::vectorize<int>(in_x_grad->dims());
auto diff_dst_tz = paddle::framework::vectorize<int>(out_grad->dims());
auto diff_src_tz = paddle::framework::vectorize<int64_t>(in_x_grad->dims());
auto diff_dst_tz = paddle::framework::vectorize<int64_t>(out_grad->dims());
// Get an unique name from "argument" name of "Out" variable
// This name will be used as key when referring info from device context
......@@ -173,22 +186,21 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
auto diff_dst_memory = handler.AcquireDiffDstMemory(out_grad);
auto diff_src_memory = handler.AcquireDiffSrcMemory(in_x_grad);
std::shared_ptr<mkldnn::pooling_backward> pool_bwd_p;
std::shared_ptr<mkldnn::memory> workspace_memory;
auto pool_bwd_p = handler.AcquireBackwardPrimitive();
mkldnn::stream astream(dev_ctx.GetEngine());
if (pooling_type == "max") {
// Max - pooling needs Workspace
workspace_memory = handler.AcquireWorkspaceMemory();
pool_bwd_p = handler.AcquireBackwardPrimitive(
*diff_dst_memory, *workspace_memory, *diff_src_memory);
auto workspace_memory = handler.AcquireWorkspaceMemory();
pool_bwd_p->execute(astream, {{MKLDNN_ARG_DIFF_SRC, *diff_src_memory},
{MKLDNN_ARG_DIFF_DST, *diff_dst_memory},
{MKLDNN_ARG_WORKSPACE, *workspace_memory}});
} else {
// Average Pooling
pool_bwd_p =
handler.AcquireBackwardPrimitive(*diff_dst_memory, *diff_src_memory);
pool_bwd_p->execute(astream, {{MKLDNN_ARG_DIFF_SRC, *diff_src_memory},
{MKLDNN_ARG_DIFF_DST, *diff_dst_memory}});
}
pipeline.push_back(*pool_bwd_p);
mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait();
astream.wait();
in_x_grad->set_layout(DataLayout::kMKLDNN);
in_x_grad->set_format(platform::GetMKLDNNFormat(*diff_src_memory));
......
......@@ -42,8 +42,8 @@ class QuantOpKernel : public framework::OpKernel<T> {
const auto& engine = dev_ctx.GetEngine();
std::vector<primitive> pipeline;
auto src_tz = paddle::framework::vectorize<int>(input->dims());
auto dst_tz = paddle::framework::vectorize<int>(output->dims());
auto src_tz = paddle::framework::vectorize<int64_t>(input->dims());
auto dst_tz = paddle::framework::vectorize<int64_t>(output->dims());
const T* input_data = input->data<T>();
......@@ -66,24 +66,20 @@ class QuantOpKernel : public framework::OpKernel<T> {
auto src_md = platform::MKLDNNMemDesc({src_tz}, memory::data_type::f32,
input->format());
auto src_pd = mkldnn::memory::primitive_desc(src_md, engine);
src_memory =
std::make_shared<mkldnn::memory>(src_pd, to_void_cast<T>(input_data));
std::shared_ptr<primitive::at> src_memory_p =
std::shared_ptr<primitive::at>(new primitive::at(*src_memory));
src_memory = std::make_shared<mkldnn::memory>(
src_md, engine, to_void_cast<T>(input_data));
std::shared_ptr<mkldnn::memory::primitive_desc> dst_pd;
std::shared_ptr<mkldnn::memory::desc> dst_md;
if (is_negative) {
platform::SetDstMemoryQuantized<int8_t>(ctx, output, dst_tz, engine,
dst_pd, dst_memory);
dst_md, dst_memory);
} else {
platform::SetDstMemoryQuantized<uint8_t>(ctx, output, dst_tz, engine,
dst_pd, dst_memory);
dst_md, dst_memory);
}
auto reorder_pd = std::shared_ptr<reorder::primitive_desc>(
new reorder::primitive_desc(src_pd, *dst_pd, attri));
reorder_p = std::shared_ptr<reorder>(
new reorder(*reorder_pd, *src_memory_p, *dst_memory));
new reorder::primitive_desc(*src_memory, *dst_memory, attri));
reorder_p = std::shared_ptr<reorder>(new reorder(*reorder_pd));
dev_ctx.SetBlob(key_prim, reorder_p);
dev_ctx.SetBlob(key_src_mem, src_memory);
......@@ -103,8 +99,10 @@ class QuantOpKernel : public framework::OpKernel<T> {
}
}
pipeline.push_back(*reorder_p);
stream(stream::kind::eager).submit(pipeline).wait();
mkldnn::stream astream(engine);
reorder_p->execute(astream, *src_memory, *dst_memory);
astream.wait();
output->set_layout(DataLayout::kMKLDNN);
output->set_format(GetMKLDNNFormat(*dst_memory));
}
......
......@@ -43,8 +43,8 @@ class ReQuantOpKernel : public framework::OpKernel<T> {
const auto& engine = dev_ctx.GetEngine();
std::vector<primitive> pipeline;
auto src_tz = paddle::framework::vectorize<int>(input->dims());
auto dst_tz = paddle::framework::vectorize<int>(output->dims());
auto src_tz = paddle::framework::vectorize<int64_t>(input->dims());
auto dst_tz = paddle::framework::vectorize<int64_t>(output->dims());
mkldnn::memory::data_type src_dt =
paddle::framework::ToMKLDNNDataType(input->type());
mkldnn::memory::data_type dst_dt = src_dt;
......@@ -60,23 +60,21 @@ class ReQuantOpKernel : public framework::OpKernel<T> {
attri.set_output_scales(mask, {scale_shift});
auto src_md = platform::MKLDNNMemDesc({src_tz}, src_dt, src_fmt);
auto src_pd = mkldnn::memory::primitive_desc(src_md, engine);
auto src_memory =
std::make_shared<mkldnn::memory>(src_pd, to_void_cast<T>(input_data));
std::shared_ptr<primitive::at> src_memory_p =
std::shared_ptr<primitive::at>(new primitive::at(*src_memory));
auto src_memory = std::make_shared<mkldnn::memory>(
src_md, engine, to_void_cast<T>(input_data));
auto dst_md = platform::MKLDNNMemDesc({dst_tz}, dst_dt, dst_fmt);
auto dst_pd = mkldnn::memory::primitive_desc(dst_md, engine);
auto dst_memory = mkldnn::memory(dst_pd, to_void_cast<T>(output_data));
auto dst_memory =
mkldnn::memory(dst_md, engine, to_void_cast<T>(output_data));
auto reorder_pd = std::shared_ptr<reorder::primitive_desc>(
new reorder::primitive_desc(src_pd, dst_pd, attri));
new reorder::primitive_desc(*src_memory, dst_memory, attri));
auto reorder_p = std::shared_ptr<reorder>(
new reorder(*reorder_pd, *src_memory_p, dst_memory));
pipeline.push_back(*reorder_p);
stream(stream::kind::eager).submit(pipeline).wait();
auto reorder_p = std::shared_ptr<reorder>(new reorder(*reorder_pd));
mkldnn::stream astream(engine);
reorder_p->execute(astream, *src_memory, dst_memory);
astream.wait();
output->set_layout(DataLayout::kMKLDNN);
output->set_format(GetMKLDNNFormat(dst_memory));
......
......@@ -38,7 +38,7 @@ class SoftmaxMKLDNNHandler
: public platform::MKLDNNHandlerT<T, mkldnn::softmax_forward,
mkldnn::softmax_backward> {
public:
SoftmaxMKLDNNHandler(const std::vector<int>& dims,
SoftmaxMKLDNNHandler(const std::vector<int64_t>& dims,
const MKLDNNMemoryFormat fmt, const int& axis,
const platform::MKLDNNDeviceContext& dev_ctx,
platform::Place cpu_place, const std::string& uniq_name)
......@@ -52,7 +52,7 @@ class SoftmaxMKLDNNHandler
axis);
}
SoftmaxMKLDNNHandler(const std::vector<int>& dims,
SoftmaxMKLDNNHandler(const std::vector<int64_t>& dims,
const MKLDNNMemoryFormat fmt,
const MKLDNNMemoryFormat diff_fmt, const int& axis,
const platform::MKLDNNDeviceContext& dev_ctx,
......@@ -87,25 +87,24 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel<T> {
auto dims = input->dims(); // input and output share the same shape
const int axis = CanonicalAxis(ctx.Attr<int>("axis"), dims.size());
auto softmax_tz = paddle::framework::vectorize<int>(dims);
auto softmax_tz = paddle::framework::vectorize<int64_t>(dims);
SoftmaxMKLDNNHandler<T> handler(softmax_tz, input->format(), axis, dev_ctx,
ctx.GetPlace(), ctx.OutputName("Out"));
auto softmax_src_memory_p = handler.AcquireSrcMemory(input);
auto softmax_dst_memory_p = handler.AcquireDstMemory(output);
auto softmax_p = handler.AcquireForwardPrimitive(*softmax_src_memory_p,
*softmax_dst_memory_p);
auto softmax_p = handler.AcquireForwardPrimitive();
std::vector<primitive> pipeline{*softmax_p};
stream(stream::kind::eager).submit(pipeline).wait();
mkldnn::stream astream(dev_ctx.GetEngine());
softmax_p->execute(astream, {{MKLDNN_ARG_SRC, *softmax_src_memory_p},
{MKLDNN_ARG_DST, *softmax_dst_memory_p}});
astream.wait();
const bool is_test = ctx.Attr<bool>("is_test");
if (!is_test) {
T* output_data = output->mutable_data<T>(ctx.GetPlace());
int size = std::accumulate(begin(softmax_tz), end(softmax_tz), 1,
std::multiplies<int>());
std::for_each(output_data, &output_data[size], [](T& val) {
std::for_each(output_data, &output_data[output->numel()], [](T& val) {
val = std::max(val, static_cast<T>(exp(-64)));
});
}
......@@ -136,7 +135,7 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> {
auto dims = dout->dims(); // input and output share the same shape
const int axis = CanonicalAxis(ctx.Attr<int>("axis"), dims.size());
std::vector<int> softmax_tz = paddle::framework::vectorize<int>(dims);
auto softmax_tz = paddle::framework::vectorize<int64_t>(dims);
SoftmaxMKLDNNHandler<T> handler(softmax_tz, output->format(),
dout->format(), axis, dev_ctx,
......@@ -146,11 +145,14 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> {
auto diff_dst_memory_p = handler.AcquireDiffDstMemory(dout);
auto diff_src_memory_p = handler.AcquireDiffSrcMemory(dx);
auto softmax_bwd_p = handler.AcquireBackwardPrimitive(
*dst_memory_p, *diff_dst_memory_p, *diff_src_memory_p);
auto softmax_bwd_p = handler.AcquireBackwardPrimitive();
std::vector<primitive> pipeline{*softmax_bwd_p};
stream(stream::kind::eager).submit(pipeline).wait();
mkldnn::stream astream(dev_ctx.GetEngine());
softmax_bwd_p->execute(astream,
{{MKLDNN_ARG_DST, *dst_memory_p},
{MKLDNN_ARG_DIFF_DST, *diff_dst_memory_p},
{MKLDNN_ARG_DIFF_SRC, *diff_src_memory_p}});
astream.wait();
dx->set_layout(framework::DataLayout::kMKLDNN);
dx->set_format(dout->format());
......
......@@ -63,11 +63,11 @@ class SumMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
LoDTensor* output = ctx.Output<LoDTensor>("Out");
T* output_data = output->mutable_data<T>(ctx.GetPlace());
auto dst_tz = framework::vectorize<int>(output->dims());
auto dst_tz = framework::vectorize<int64_t>(output->dims());
auto src_tz = dst_tz;
MKLDNNMemoryFormat output_format{MKLDNNMemoryFormat::format_undef};
MKLDNNMemoryFormat output_format{MKLDNNMemoryFormat::undef};
std::vector<float> scales;
std::vector<memory::primitive_desc> srcs_mpd;
std::vector<memory::desc> srcs_md;
std::vector<mkldnn::memory> srcs_mem;
PADDLE_ENFORCE_EQ(in_vars[0]->IsType<LoDTensor>(), true,
......@@ -75,7 +75,7 @@ class SumMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto& input0 = in_vars[0]->Get<LoDTensor>();
PADDLE_ENFORCE_EQ(input0.layout(), DataLayout::kMKLDNN,
"Wrong layout set for inputs[0] tensor");
PADDLE_ENFORCE_NE(input0.format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(input0.format(), MKLDNNMemoryFormat::undef,
"Wrong format set for inputs[0] tensor");
MKLDNNMemoryFormat input_format = input0.format();
......@@ -86,7 +86,7 @@ class SumMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto& input = in_vars[i]->Get<LoDTensor>();
PADDLE_ENFORCE_EQ(input.layout(), DataLayout::kMKLDNN,
"Wrong layout set for inputs");
PADDLE_ENFORCE_NE(input.format(), MKLDNNMemoryFormat::format_undef,
PADDLE_ENFORCE_NE(input.format(), MKLDNNMemoryFormat::undef,
"Wrong format set for inputs");
if (input.numel() == 0) {
......@@ -97,9 +97,8 @@ class SumMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto src_md =
memory::desc(src_tz, memory::data_type::f32, input_format);
auto src_mpd = memory::primitive_desc(src_md, mkldnn_engine);
auto src_mem = memory(src_mpd, to_void_cast(input_data));
srcs_mpd.push_back(src_mpd);
auto src_mem = memory(src_md, mkldnn_engine, to_void_cast(input_data));
srcs_md.push_back(src_md);
srcs_mem.push_back(src_mem);
scales.push_back(1.0);
}
......@@ -107,36 +106,43 @@ class SumMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto dst_md =
memory::desc(dst_tz, memory::data_type::f32, MKLDNNMemoryFormat::any);
auto sum_pd = sum::primitive_desc(dst_md, scales, srcs_mpd);
auto sum_pd = sum::primitive_desc(dst_md, scales, srcs_md, mkldnn_engine);
std::shared_ptr<memory> dst_mem;
if (in_place) {
dst_mem.reset(new memory(sum_pd.dst_primitive_desc()));
dst_mem.reset(new memory(sum_pd.dst_desc(), mkldnn_engine));
} else {
dst_mem.reset(new memory(sum_pd.dst_primitive_desc(), output_data));
}
std::vector<mkldnn::primitive::at> inputs;
for (size_t i = 0; i < srcs_mem.size(); ++i) {
inputs.push_back(srcs_mem[i]);
dst_mem.reset(
new memory(sum_pd.dst_desc(), mkldnn_engine, output_data));
}
auto sum_prim = mkldnn::sum(sum_pd, inputs, *dst_mem);
output_format = (MKLDNNMemoryFormat)platform::GetMKLDNNFormat(sum_pd);
auto sum_prim = mkldnn::sum(sum_pd);
output_format = platform::GetMKLDNNFormat(sum_pd.dst_desc());
primitive reorder_prim;
std::shared_ptr<mkldnn::reorder> reorder_p;
std::shared_ptr<memory> target_mem;
if (in_place) {
output_format = input_format;
target_mem.reset(new memory(
{{{src_tz}, memory::data_type::f32, output_format}, mkldnn_engine},
output_data));
reorder_prim = reorder(*dst_mem, *target_mem);
target_mem.reset(
new memory({{src_tz}, memory::data_type::f32, output_format},
mkldnn_engine, output_data));
reorder_p = std::make_shared<reorder>(*dst_mem, *target_mem);
}
mkldnn::stream astream(mkldnn_engine);
std::unordered_map<int, memory> args;
for (size_t i = 0; i < srcs_mem.size(); ++i) {
args.insert({MKLDNN_ARG_MULTIPLE_SRC + i, srcs_mem.at(i)});
}
args.insert({MKLDNN_ARG_DST, *dst_mem});
sum_prim.execute(astream, args);
astream.wait();
std::vector<primitive> pipeline;
pipeline.push_back(sum_prim);
if (in_place) pipeline.push_back(reorder_prim);
stream(stream::kind::eager).submit(pipeline).wait();
if (in_place) {
reorder_p->execute(astream, *dst_mem, *target_mem);
astream.wait();
}
output->set_layout(DataLayout::kMKLDNN);
output->set_format(output_format);
......
......@@ -44,7 +44,7 @@ class TransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
return;
}
auto nchw_tz = paddle::framework::vectorize<int>(input->dims());
auto nchw_tz = paddle::framework::vectorize<int64_t>(input->dims());
const std::string key = platform::CreateKey(nchw_tz, ctx.OutputName("Out"));
......@@ -58,12 +58,13 @@ class TransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto transpose_p = handler.AcquireTranspose(transpose_dst_memory_p,
transpose_src_memory_p);
std::vector<mkldnn::primitive> pipeline;
pipeline.push_back(*transpose_p);
mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait();
mkldnn::stream astream(mkldnn_engine);
transpose_p->execute(astream, *transpose_src_memory_p,
*transpose_dst_memory_p);
astream.wait();
output->set_layout(DataLayout::kNCHW);
output->set_format(MKLDNNMemoryFormat::format_undef);
output->set_format(MKLDNNMemoryFormat::undef);
}
};
......@@ -95,7 +96,7 @@ class TransposeMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
const T* out_grad_data = out_grad->data<T>();
x_grad->mutable_data<T>(ctx.GetPlace());
auto nchw_tz = paddle::framework::vectorize<int>(out_grad->dims());
auto nchw_tz = paddle::framework::vectorize<int64_t>(out_grad->dims());
const std::string key = platform::CreateKey(
nchw_tz, ctx.OutputName(framework::GradVarName("X")));
......@@ -110,9 +111,10 @@ class TransposeMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
auto transpose_p = handler.AcquireTranspose(transpose_dst_memory_p,
transpose_src_memory_p);
std::vector<mkldnn::primitive> pipeline;
pipeline.push_back(*transpose_p);
mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait();
mkldnn::stream astream(mkldnn_engine);
transpose_p->execute(astream, *transpose_src_memory_p,
*transpose_dst_memory_p);
astream.wait();
}
};
......
......@@ -376,7 +376,9 @@ Place CUDAPinnedDeviceContext::GetPlace() const { return place_; }
#ifdef PADDLE_WITH_MKLDNN
MKLDNNDeviceContext::MKLDNNDeviceContext(CPUPlace place)
: CPUDeviceContext(place), engine_(mkldnn::engine::cpu, 0), p_blobmap_() {
: CPUDeviceContext(place),
engine_(mkldnn::engine::kind::cpu, 0),
p_blobmap_() {
p_blobmap_.reset(new BlobMap());
p_mutex_.reset(new std::mutex());
}
......
......@@ -23,7 +23,7 @@ limitations under the License. */
#include "paddle/fluid/platform/place.h"
namespace paddle {
#ifdef PADDLE_WITH_MKLDNN
using MKLDNNMemoryFormat = mkldnn::memory::format;
using MKLDNNMemoryFormat = mkldnn::memory::format_tag;
#endif
namespace platform {
......@@ -71,11 +71,10 @@ tf_pd<Type> MKLDNNBwdPrimitiveDesc(const Engine& e, const Primitive& p,
return tf_pd<Type>(desc, e, p);
}
inline mkldnn::memory::desc MKLDNNMemDesc(const std::vector<int>& dims,
inline mkldnn::memory::desc MKLDNNMemDesc(const std::vector<int64_t>& dims,
mkldnn::memory::data_type data_type,
MKLDNNMemoryFormat format) {
mkldnn::memory::dims tz = dims;
return mkldnn::memory::desc({tz}, data_type, format);
return mkldnn::memory::desc({dims}, data_type, format);
}
inline bool CanMKLDNNBeUsed(const framework::ExecutionContext& ctx) {
......@@ -85,7 +84,7 @@ inline bool CanMKLDNNBeUsed(const framework::ExecutionContext& ctx) {
template <typename Type>
mkldnn::memory::data_type MKLDNNGetDataType() {
return mkldnn::memory::data_type::data_undef;
return mkldnn::memory::data_type::undef;
}
template <>
......@@ -105,22 +104,136 @@ inline mkldnn::memory::data_type MKLDNNGetDataType<uint8_t>() {
return mkldnn::memory::data_type::u8;
}
inline void Reorder(const mkldnn::memory& src, const mkldnn::memory& dst) {
inline void Reorder(mkldnn::memory src, mkldnn::memory dst,
const mkldnn::engine& engine) {
auto reorder_prim = mkldnn::reorder(src, dst);
std::vector<mkldnn::primitive> pipeline;
pipeline.push_back(reorder_prim);
mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait();
mkldnn::stream astream(engine);
reorder_prim.execute(astream, src, dst);
astream.wait();
}
inline MKLDNNMemoryFormat GetMKLDNNFormat(const mkldnn::memory memory) {
return static_cast<MKLDNNMemoryFormat>(
memory.get_primitive_desc().desc().data.format);
inline mkldnn::memory::format_tag GetMKLDNNFormat(
mkldnn::memory::desc mem_desc) {
auto ndims = mem_desc.data.ndims;
auto strides = mem_desc.data.format_desc.blocking.strides;
auto inner_nblks = mem_desc.data.format_desc.blocking.inner_nblks;
auto inner_blks = mem_desc.data.format_desc.blocking.inner_blks;
auto inner_idxs = mem_desc.data.format_desc.blocking.inner_idxs;
if (ndims == 1) {
return mkldnn::memory::format_tag::x;
} else if (ndims == 2) {
if (inner_nblks == 0) {
if (strides[0] >= strides[1]) {
return mkldnn::memory::format_tag::nc;
} else {
return mkldnn::memory::format_tag::cn;
}
}
} else if (ndims == 3) {
if (inner_nblks == 0) {
if (strides[0] >= strides[1] && strides[1] >= strides[2]) {
return mkldnn::memory::format_tag::ncw;
} else {
return mkldnn::memory::format_tag::nwc;
}
}
} else if (ndims == 4) {
if (inner_nblks == 0) {
if (strides[0] >= strides[1] && strides[1] >= strides[2] &&
strides[2] >= strides[3]) {
return mkldnn::memory::format_tag::nchw;
} else {
return mkldnn::memory::format_tag::nhwc;
}
} else if (inner_nblks == 1) {
if (inner_blks[0] == 16 && inner_idxs[0] == 1) {
return mkldnn::memory::format_tag::nChw16c;
} else if (inner_blks[0] == 8 && inner_idxs[0] == 1) {
return mkldnn::memory::format_tag::nChw8c;
} else if (inner_blks[0] == 8 && inner_idxs[0] == 0) {
if (strides[0] >= strides[2] && strides[2] >= strides[3] &&
strides[3] >= strides[1]) {
return mkldnn::memory::format_tag::Acdb8a;
}
} else if (inner_blks[0] == 4 && inner_idxs[0] == 1) {
return mkldnn::memory::format_tag::nChw4c;
} else if (inner_blks[0] == 16 && inner_idxs[0] == 0) {
if (strides[0] >= strides[2] && strides[2] >= strides[3] &&
strides[3] >= strides[1]) {
return mkldnn::memory::format_tag::Acdb16a;
}
}
} else if (inner_nblks == 2) {
if (inner_blks[0] == 16 && inner_blks[1] == 16) {
if (inner_idxs[0] == 1 && inner_idxs[1] == 0) {
return mkldnn::memory::format_tag::OIhw16i16o;
}
} else if (inner_blks[0] == 8 && inner_blks[1] == 8) {
if (inner_idxs[0] == 1 && inner_idxs[1] == 0) {
return mkldnn::memory::format_tag::OIhw8i8o;
}
}
}
} else if (ndims == 5) {
if (inner_nblks == 0) {
if (strides[0] >= strides[1] && strides[1] >= strides[2] &&
strides[2] >= strides[3] && strides[3] >= strides[4]) {
return mkldnn::memory::format_tag::ncdhw;
} else {
return mkldnn::memory::format_tag::ndhwc;
}
} else if (inner_nblks == 1) {
if (inner_blks[0] == 8 && inner_idxs[0] == 0) {
if (strides[0] >= strides[2] && strides[2] >= strides[3] &&
strides[3] >= strides[4] && strides[4] >= strides[1]) {
return mkldnn::memory::format_tag::Acdeb8a;
}
} else if (inner_blks[0] == 8 && inner_idxs[0] == 1) {
if (strides[0] >= strides[1] && strides[1] >= strides[2] &&
strides[2] >= strides[3] && strides[3] >= strides[4]) {
return mkldnn::memory::format_tag::aBcde8b;
}
} else if (inner_blks[0] == 16 && inner_idxs[0] == 0) {
if (strides[0] >= strides[2] && strides[2] >= strides[3] &&
strides[3] >= strides[4] && strides[4] >= strides[1]) {
return mkldnn::memory::format_tag::Acdeb16a;
}
} else if (inner_blks[0] == 16 && inner_idxs[0] == 1) {
if (strides[0] >= strides[1] && strides[1] >= strides[2] &&
strides[2] >= strides[3] && strides[3] >= strides[4]) {
return mkldnn::memory::format_tag::aBcde16b;
}
}
}
} else if (ndims == 6) {
if (inner_nblks == 0) {
if (strides[0] >= strides[1] && strides[1] >= strides[2] &&
strides[2] >= strides[3] && strides[3] >= strides[4] &&
strides[4] >= strides[5]) {
return mkldnn::memory::format_tag::abcdef;
}
}
}
// DEBUG CODE - KEEP UNTILL TENSOR.MEMORY_DESC IMPLEMENTED
// std::cout<<"@@@@@@@@@@ UNDEFINED FORMAT @@@@@@@@@@@@@@@@@@@"<<std::endl;
// std::cout<<"NDIMS: "<<ndims<<std::endl;
// std::cout<<"INNER_NBLKS: "<<inner_nblks<<std::endl;
// for (int i=0;i<ndims;++i) {
// std::cout<<"STRIDE["<<i<<"]: "<<strides[i]<<std::endl;
// }
// for (int i=0;i<inner_nblks;++i) {
// std::cout<<"INNER_BLKS["<<i<<"]: "<<inner_blks[i]<<std::endl;
// }
// for (int i=0;i<inner_nblks;++i) {
// std::cout<<"INNER_IDXS["<<i<<"]: "<<inner_idxs[i]<<std::endl;
// }
return mkldnn::memory::format_tag::undef;
}
inline MKLDNNMemoryFormat GetMKLDNNFormat(
const mkldnn::sum::primitive_desc& memory) {
return static_cast<MKLDNNMemoryFormat>(
memory.dst_primitive_desc().desc().data.format);
inline mkldnn::memory::format_tag GetMKLDNNFormat(const mkldnn::memory memory) {
auto mem_desc = memory.get_desc();
return GetMKLDNNFormat(mem_desc);
}
inline MKLDNNMemoryFormat MKLDNNFormatForSize(size_t dims_size,
......@@ -190,13 +303,37 @@ inline void AppendKey(std::string* key, const T& num) {
key->append(std::to_string(num));
}
template <>
inline void AppendKey(std::string* key,
const mkldnn::memory::format_tag& format) {
key->append(std::to_string(static_cast<int>(format)));
}
template <>
inline void AppendKey(std::string* key,
const mkldnn::memory::data_type& data_type) {
key->append(std::to_string(static_cast<int>(data_type)));
}
template <>
inline void AppendKey(std::string* key, const mkldnn::algorithm& algorithm) {
key->append(std::to_string(static_cast<int>(algorithm)));
}
template <>
inline void AppendKey(std::string* key,
const mkldnn::normalization_flags& flags) {
key->append(std::to_string(static_cast<int>(flags)));
}
inline void AppendKey(std::string* key, const std::string& str) {
key->append(str);
}
inline void AppendKey(std::string* key, const char* str) { key->append(str); }
inline void AppendKey(std::string* key, const std::vector<int>& dims) {
template <typename T>
inline void AppendKey(std::string* key, const std::vector<T>& dims) {
for (size_t i = 0; i < dims.size(); i++) {
AppendKey(key, std::to_string(dims[i]));
}
......@@ -211,8 +348,8 @@ inline std::string CreateKey(ArgTypes&&... args) {
return key;
}
inline std::vector<std::vector<int>> ToMkldnnPadding(
const std::vector<int>& paddings) {
inline std::vector<std::vector<int64_t>> ToMkldnnPadding(
const std::vector<int64_t>& paddings) {
if (paddings.size() == 6) {
int padding_front = paddings[0];
int padding_back = paddings[1];
......
......@@ -15,6 +15,7 @@ limitations under the License. */
#pragma once
#include <sstream>
#include <string>
#include <type_traits>
#include <typeindex>
namespace paddle {
......@@ -24,13 +25,20 @@ inline std::ostream& operator<<(std::ostream& s, const std::type_index& t) {
return s;
}
template <typename T>
template <typename T,
typename std::enable_if<!std::is_enum<T>::value, int>::type = 0>
inline std::string to_string(T v) {
std::ostringstream sout;
sout << v;
return sout.str();
}
template <typename T,
typename std::enable_if<std::is_enum<T>::value, int>::type = 0>
inline std::string to_string(T v) {
return std::to_string(static_cast<int>(v));
}
template <>
inline std::string to_string(std::type_index t) {
return t.name();
......
......@@ -203,14 +203,16 @@ if '${WITH_MKLDNN}' == 'ON':
# TODO(typhoonzero): use install_name_tool to patch mkl libs once
# we can support mkl on mac.
#
# change rpath of libmkldnn.so.0, add $ORIGIN/ to it.
# change rpath of libmkldnn.so.1, add $ORIGIN/ to it.
# The reason is that all thirdparty libraries in the same directory,
# thus, libmkldnn.so.0 will find libmklml_intel.so and libiomp5.so.
# thus, libmkldnn.so.1 will find libmklml_intel.so and libiomp5.so.
command = "patchelf --set-rpath '$ORIGIN/' ${MKLDNN_SHARED_LIB}"
if os.system(command) != 0:
raise Exception("patch libmkldnn.so failed, command: %s" % command)
package_data['paddle.libs']+=['libmkldnn.so.0' if os.name != 'nt' else ('mkldnn' + ext_name)]
package_data['paddle.libs']+=['libmkldnn.so.0','libmkldnn.so.1' if os.name != 'nt' else ('mkldnn' + ext_name)]
shutil.copy('${MKLDNN_SHARED_LIB}', libs_path)
if os.name != 'nt':
shutil.copy('${MKLDNN_SHARED_LIB_1}', libs_path)
if '${WITH_NGRAPH}' == 'ON':
# only change rpath in Release mode,
# since in Debug mode, nGraph lib may be too large to be changed?
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册