提交 365e6cfd 编写于 作者: D dengkaipeng

add mkldnn support. test=develop

上级 217db273
......@@ -86,7 +86,7 @@ paddle.fluid.layers.conv2d (ArgSpec(args=['input', 'num_filters', 'filter_size',
paddle.fluid.layers.conv3d (ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, None, None)), ('document', '37042620f9bd3a2da6e5d3138b2f724b'))
paddle.fluid.layers.sequence_pool (ArgSpec(args=['input', 'pool_type', 'is_test'], varargs=None, keywords=None, defaults=(False,)), ('document', 'a194fb80614023f543df3949fbd0d0b8'))
paddle.fluid.layers.sequence_softmax (ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', '19ef6f9cdd27feac8a1ae060f19c10b4'))
paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(False, None)), ('document', 'f19dd380864e61134ce3814e4be0de4b'))
paddle.fluid.layers.softmax (ArgSpec(args=['input', 'use_cudnn', 'name', 'axis'], varargs=None, keywords=None, defaults=(False, None, -1)), ('document', 'f19dd380864e61134ce3814e4be0de4b'))
paddle.fluid.layers.pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)), ('document', 'bbd84e855e660cd1084bb71a2fd0cdaa'))
paddle.fluid.layers.pool3d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'name', 'exclusive'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, None, True)), ('document', '043de7333b79ee0ac55053c14ed81625'))
paddle.fluid.layers.adaptive_pool2d (ArgSpec(args=['input', 'pool_size', 'pool_type', 'require_index', 'name'], varargs=None, keywords=None, defaults=('max', False, None)), ('document', '859b887174d06f361658f69cb7c06d95'))
......
......@@ -131,29 +131,22 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel<T> {
if (axis != -1 && axis != rank - 1) {
X_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
Out_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
TransCompute<MKLDNNDeviceContext, T>(rank, dev_ctx, *X, &X_trans, perm);
TransCompute<MKLDNNDeviceContext, T>(rank, dev_ctx, *Out, &Out_trans, perm);
X_2d = framework::ReshapeToMatrix(X_trans, rank - 1);
Out_2d = framework::ReshapeToMatrix(Out_trans, rank - 1);
TransCompute<platform::CPUDeviceContext, T>(rank, dev_ctx, *X, &X_trans, perm);
TransCompute<platform::CPUDeviceContext, T>(rank, dev_ctx, *Out, &Out_trans, perm);
auto dims = X_trans.dims();
auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
X_2d.ShareDataWith(X_trans).Resize(flattened_dims);
Out_2d.ShareDataWith(Out_trans).Resize(flattened_dims);
} else {
X_2d = framework::ReshapeToMatrix(*X, rank - 1);
Out_2d = framework::ReshapeToMatrix(*Out, rank - 1);
auto dims = X->dims();
auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
X_2d.ShareDataWith(*X).Resize(flattened_dims);
Out_2d.ShareDataWith(*Out).Resize(flattened_dims);
}
// flatten input and output to 2-D matrixs
// auto dims = input->dims(); // input and output share the same shape
// auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
// framework::Tensor flattened_input;
// framework::Tensor flattened_output;
// flattened_input.ShareDataWith(*input).Resize(flattened_dims);
// flattened_output.ShareDataWith(*output).Resize(flattened_dims);
// const T* input_data = flattened_input.data<T>();
// T* output_data = flattened_output.mutable_data<T>(ctx.GetPlace());
const T* input_data = X_2d.data<T>();
T* output_data = Out_2d.mutable_data<T>(ctx.GetPlace());
// std::vector<int> src_tz = paddle::framework::vectorize2int(flattened_dims);
std::vector<int> src_tz = paddle::framework::vectorize2int(X_2d.dims());
std::vector<int> dst_tz = src_tz;
// Same memory descriptor to be used for input and output
......@@ -184,10 +177,16 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel<T> {
// We cannot use softmax_dst_memory_p to get prim desc as
// it contains flattened dims (2D) while output tensor can
// have 2,3,4+ dims
if (axis != -1 && axis != rank - 1) {
auto output_mem_pd = paddle::platform::create_prim_desc_from_dims(
paddle::framework::vectorize2int(output->dims()),
shape, mkldnn::memory::format::blocked);
Out_trans.set_mkldnn_prim_desc(output_mem_pd);
} else {
auto output_mem_pd = paddle::platform::create_prim_desc_from_dims(
paddle::framework::vectorize2int(Out->dims()),
mkldnn::memory::format::blocked);
output->set_mkldnn_prim_desc(output_mem_pd);
Out->set_mkldnn_prim_desc(output_mem_pd);
}
std::vector<primitive> pipeline{
*(static_cast<softmax_forward::primitive*>(softmax_p.get()))};
......@@ -203,7 +202,7 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel<T> {
}
if (axis != -1 && axis != rank - 1) {
TransCompute<MKLDNNDeviceContext, T>(rank, dev_ctx, Out_trans, Out, perm);
TransCompute<platform::CPUDeviceContext, T>(rank, dev_ctx, Out_trans, Out, perm);
}
}
};
......@@ -242,30 +241,22 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> {
dX_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
Out_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
dOut_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
TransCompute<MKLDNNDeviceContext, T>(rank, dev_ctx, *dX, &dX_trans, perm);
TransCompute<MKLDNNDeviceContext, T>(rank, dev_ctx, *Out, &Out_trans, perm);
TransCompute<MKLDNNDeviceContext, T>(rank, dev_ctx, *dOut, &dOut_trans, perm);
dX_2d = framework::ReshapeToMatrix(dX_trans, rank - 1);
Out_2d = framework::ReshapeToMatrix(Out_trans, rank - 1);
dOut_2d = framework::ReshapeToMatrix(dOut_trans, rank - 1);
TransCompute<platform::CPUDeviceContext, T>(rank, dev_ctx, *dX, &dX_trans, perm);
TransCompute<platform::CPUDeviceContext, T>(rank, dev_ctx, *Out, &Out_trans, perm);
TransCompute<platform::CPUDeviceContext, T>(rank, dev_ctx, *dOut, &dOut_trans, perm);
auto dims = dX_trans.dims();
auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
dX_2d.ShareDataWith(dX_trans).Resize(flattened_dims);
Out_2d.ShareDataWith(Out_trans).Resize(flattened_dims);
dOut_2d.ShareDataWith(dOut_trans).Resize(flattened_dims);
} else {
dX_2d = framework::ReshapeToMatrix(*dX, rank - 1);
Out_2d = framework::ReshapeToMatrix(*Out, rank - 1);
dOut_2d = framework::ReshapeToMatrix(*dOut, rank - 1);
auto dims = dX->dims();
auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
dX_2d.ShareDataWith(*dX).Resize(flattened_dims);
Out_2d.ShareDataWith(*Out).Resize(flattened_dims);
dOut_2d.ShareDataWith(*dOut).Resize(flattened_dims);
}
// auto dims = dout->dims(); // input and output share the same shape
// auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
// framework::Tensor flattened_output;
// framework::Tensor flattened_dout;
// framework::Tensor flattened_dx;
// flattened_output.ShareDataWith(*output).Resize(flattened_dims);
// flattened_dout.ShareDataWith(*dout).Resize(flattened_dims);
// flattened_dx.ShareDataWith(*dx).Resize(flattened_dims);
// const T* dst_data = flattened_output.data<T>();
// const T* diff_dst_ptr = flattened_dout.template data<T>();
// T* diff_src_ptr = flattened_dx.template mutable_data<T>(ctx.GetPlace());
const T* dst_data = Out_2d.data<T>();
const T* diff_dst_ptr = dOut_2d.template data<T>();
T* diff_src_ptr = dX_2d.template mutable_data<T>(ctx.GetPlace());
......@@ -317,7 +308,7 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> {
stream(stream::kind::eager).submit(pipeline).wait();
if (axis != -1 && axis != rank - 1) {
TransCompute<MKLDNNDeviceContext, T>(rank, dev_ctx, dX_trans, dX, perm);
TransCompute<platform::CPUDeviceContext, T>(rank, dev_ctx, dX_trans, dX, perm);
}
}
};
......
......@@ -32,6 +32,30 @@ class TestSoftmaxMKLDNNOp2(TestSoftmaxMKLDNNOp):
return [2, 3, 4, 5]
class TestSoftmaxMKLDNNOp2(TestSoftmaxMKLDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 0
class TestSoftmaxMKLDNNOp2(TestSoftmaxMKLDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 1
class TestSoftmaxMKLDNNOp2(TestSoftmaxMKLDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 2
# Check if primitives already exist in backward
class TestSoftmaxMKLDNNPrimitivesAlreadyExist(unittest.TestCase):
def setUp(self):
......
......@@ -131,13 +131,23 @@ class TestSoftmaxCUDNNOp3(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 0
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp4(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 1
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp):
class TestSoftmaxCUDNNOp5(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册