提交 217db273 编写于 作者: D dengkaipeng

add mkldnn support. test=develop

上级 6cb66721
...@@ -110,28 +110,51 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel<T> { ...@@ -110,28 +110,51 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel<T> {
"It must use CPUPlace."); "It must use CPUPlace.");
auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>(); auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();
auto mkldnn_engine = dev_ctx.GetEngine(); auto mkldnn_engine = dev_ctx.GetEngine();
const Tensor* input = ctx.Input<Tensor>("X"); const Tensor* X = ctx.Input<Tensor>("X");
Tensor* output = ctx.Output<Tensor>("Out"); Tensor* Out = ctx.Output<Tensor>("Out");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
input->dims(), output->dims(), X->dims(), Out->dims(),
"The shape of softmax's input and output must be identical."); "The shape of softmax's input and output must be identical.");
const int axis = ctx.Attr<int>("axis");
int rank = X->dims().size();
// make sure 'output' holds memory, which will be shared by // make sure 'output' holds memory, which will be shared by
// 'flattened_output' later. // 'flattened_output' later.
output->mutable_data<T>(ctx.GetPlace()); Out->mutable_data<T>(ctx.GetPlace());
std::vector<int> perm, shape;
CalcTransPermAndShapeByAxis(*X, axis, &perm, &shape);
Tensor X_2d, Out_2d;
Tensor X_trans, Out_trans;
if (axis != -1 && axis != rank - 1) {
X_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
Out_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
TransCompute<MKLDNNDeviceContext, T>(rank, dev_ctx, *X, &X_trans, perm);
TransCompute<MKLDNNDeviceContext, T>(rank, dev_ctx, *Out, &Out_trans, perm);
X_2d = framework::ReshapeToMatrix(X_trans, rank - 1);
Out_2d = framework::ReshapeToMatrix(Out_trans, rank - 1);
} else {
X_2d = framework::ReshapeToMatrix(*X, rank - 1);
Out_2d = framework::ReshapeToMatrix(*Out, rank - 1);
}
// flatten input and output to 2-D matrixs // flatten input and output to 2-D matrixs
auto dims = input->dims(); // input and output share the same shape // auto dims = input->dims(); // input and output share the same shape
auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); // auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
framework::Tensor flattened_input; // framework::Tensor flattened_input;
framework::Tensor flattened_output; // framework::Tensor flattened_output;
flattened_input.ShareDataWith(*input).Resize(flattened_dims); // flattened_input.ShareDataWith(*input).Resize(flattened_dims);
flattened_output.ShareDataWith(*output).Resize(flattened_dims); // flattened_output.ShareDataWith(*output).Resize(flattened_dims);
const T* input_data = flattened_input.data<T>(); // const T* input_data = flattened_input.data<T>();
T* output_data = flattened_output.mutable_data<T>(ctx.GetPlace()); // T* output_data = flattened_output.mutable_data<T>(ctx.GetPlace());
const T* input_data = X_2d.data<T>();
std::vector<int> src_tz = paddle::framework::vectorize2int(flattened_dims); T* output_data = Out_2d.mutable_data<T>(ctx.GetPlace());
// std::vector<int> src_tz = paddle::framework::vectorize2int(flattened_dims);
std::vector<int> src_tz = paddle::framework::vectorize2int(X_2d.dims());
std::vector<int> dst_tz = src_tz; std::vector<int> dst_tz = src_tz;
// Same memory descriptor to be used for input and output // Same memory descriptor to be used for input and output
memory::dims softmax_tz = {src_tz[0], src_tz[1]}; memory::dims softmax_tz = {src_tz[0], src_tz[1]};
...@@ -178,6 +201,10 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel<T> { ...@@ -178,6 +201,10 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel<T> {
output_data[i] < threshold ? threshold : output_data[i]; output_data[i] < threshold ? threshold : output_data[i];
} }
} }
if (axis != -1 && axis != rank - 1) {
TransCompute<MKLDNNDeviceContext, T>(rank, dev_ctx, Out_trans, Out, perm);
}
} }
}; };
...@@ -190,33 +217,60 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> { ...@@ -190,33 +217,60 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> {
auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>(); auto& dev_ctx = ctx.template device_context<MKLDNNDeviceContext>();
auto mkldnn_engine = dev_ctx.GetEngine(); auto mkldnn_engine = dev_ctx.GetEngine();
const Tensor* output = ctx.Input<Tensor>("Out"); const Tensor* Out = ctx.Input<Tensor>("Out");
auto* dout = ctx.template Input<Tensor>(framework::GradVarName("Out")); auto* dOut = ctx.template Input<Tensor>(framework::GradVarName("Out"));
auto* dx = auto* dX =
ctx.template Output<framework::Tensor>(framework::GradVarName("X")); ctx.template Output<framework::Tensor>(framework::GradVarName("X"));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
dout->dims(), dx->dims(), dOut->dims(), dX->dims(),
"The shape of softmax_grad's input and output must be identical."); "The shape of softmax_grad's input and output must be identical.");
const int axis = ctx.Attr<int>("axis");
int rank = Out->dims().size();
// make sure 'dx' holds memory, which will be shared by 'flattened_dx' // make sure 'dx' holds memory, which will be shared by 'flattened_dx'
// later. // later.
dx->template mutable_data<T>(ctx.GetPlace()); dX->template mutable_data<T>(ctx.GetPlace());
auto dims = dout->dims(); // input and output share the same shape std::vector<int> perm, shape;
auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); CalcTransPermAndShapeByAxis(*dX, axis, &perm, &shape);
framework::Tensor flattened_output;
framework::Tensor flattened_dout; Tensor dX_2d, Out_2d, dOut_2d;
framework::Tensor flattened_dx; Tensor dX_trans, Out_trans, dOut_trans;
flattened_output.ShareDataWith(*output).Resize(flattened_dims); if (axis != -1 && axis != rank - 1) {
flattened_dout.ShareDataWith(*dout).Resize(flattened_dims); dX_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
flattened_dx.ShareDataWith(*dx).Resize(flattened_dims); Out_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
dOut_trans.mutable_data<T>(framework::make_ddim(shape), ctx.GetPlace());
const T* dst_data = flattened_output.data<T>(); TransCompute<MKLDNNDeviceContext, T>(rank, dev_ctx, *dX, &dX_trans, perm);
const T* diff_dst_ptr = flattened_dout.template data<T>(); TransCompute<MKLDNNDeviceContext, T>(rank, dev_ctx, *Out, &Out_trans, perm);
T* diff_src_ptr = flattened_dx.template mutable_data<T>(ctx.GetPlace()); TransCompute<MKLDNNDeviceContext, T>(rank, dev_ctx, *dOut, &dOut_trans, perm);
dX_2d = framework::ReshapeToMatrix(dX_trans, rank - 1);
std::vector<int> dst_tz = paddle::framework::vectorize2int(flattened_dims); Out_2d = framework::ReshapeToMatrix(Out_trans, rank - 1);
dOut_2d = framework::ReshapeToMatrix(dOut_trans, rank - 1);
} else {
dX_2d = framework::ReshapeToMatrix(*dX, rank - 1);
Out_2d = framework::ReshapeToMatrix(*Out, rank - 1);
dOut_2d = framework::ReshapeToMatrix(*dOut, rank - 1);
}
// auto dims = dout->dims(); // input and output share the same shape
// auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1);
// framework::Tensor flattened_output;
// framework::Tensor flattened_dout;
// framework::Tensor flattened_dx;
// flattened_output.ShareDataWith(*output).Resize(flattened_dims);
// flattened_dout.ShareDataWith(*dout).Resize(flattened_dims);
// flattened_dx.ShareDataWith(*dx).Resize(flattened_dims);
// const T* dst_data = flattened_output.data<T>();
// const T* diff_dst_ptr = flattened_dout.template data<T>();
// T* diff_src_ptr = flattened_dx.template mutable_data<T>(ctx.GetPlace());
const T* dst_data = Out_2d.data<T>();
const T* diff_dst_ptr = dOut_2d.template data<T>();
T* diff_src_ptr = dX_2d.template mutable_data<T>(ctx.GetPlace());
std::vector<int> dst_tz = paddle::framework::vectorize2int(Out_2d.dims());
std::vector<int> src_tz(dst_tz); std::vector<int> src_tz(dst_tz);
// Same memory descriptor to be used for input and output // Same memory descriptor to be used for input and output
...@@ -261,6 +315,10 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> { ...@@ -261,6 +315,10 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel<T> {
std::vector<primitive> pipeline{*softmax_bwd_p}; std::vector<primitive> pipeline{*softmax_bwd_p};
stream(stream::kind::eager).submit(pipeline).wait(); stream(stream::kind::eager).submit(pipeline).wait();
if (axis != -1 && axis != rank - 1) {
TransCompute<MKLDNNDeviceContext, T>(rank, dev_ctx, dX_trans, dX, perm);
}
} }
}; };
} // namespace operators } // namespace operators
......
...@@ -28,7 +28,6 @@ class SoftmaxCUDNNKernel : public framework::OpKernel<T> { ...@@ -28,7 +28,6 @@ class SoftmaxCUDNNKernel : public framework::OpKernel<T> {
auto& dev_ctx = context.template device_context<platform::CUDADeviceContext>(); auto& dev_ctx = context.template device_context<platform::CUDADeviceContext>();
auto* X = context.Input<Tensor>("X"); auto* X = context.Input<Tensor>("X");
auto* Out = context.Output<Tensor>("Out"); auto* Out = context.Output<Tensor>("Out");
// auto dims = X->dims();
const int axis = context.Attr<int>("axis"); const int axis = context.Attr<int>("axis");
int rank = X->dims().size(); int rank = X->dims().size();
......
...@@ -85,10 +85,10 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -85,10 +85,10 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
void Make() override { void Make() override {
AddInput("X", AddInput("X",
"The input tensor of softmax, " "The input tensor of softmax, "
"whose last dimension is the input_feature_dimensions."); "whose :attr:`axis` dimension is the input_feature_dimensions.");
AddOutput("Out", "The normalized values with the same shape as X."); AddOutput("Out", "The normalized values with the same shape as X.");
AddAttr<int>("axis", AddAttr<int>("axis",
"The dimension of Input(x) to perform softmax," "The dimension index of Input(x) to perform softmax,"
"default -1 for last dimension") "default -1 for last dimension")
.SetDefault(-1); .SetDefault(-1);
AddAttr<bool>( AddAttr<bool>(
...@@ -115,12 +115,13 @@ Softmax Operator. ...@@ -115,12 +115,13 @@ Softmax Operator.
The input of the softmax operator is a tensor of any rank. The output tensor The input of the softmax operator is a tensor of any rank. The output tensor
has the same shape as the input. has the same shape as the input.
The input tensor will first be logically flattened to a 2-D matrix. The matrix's The :attr:`axis` th dimension of the input tensor will be permuted to the last.
second dimension(row length) is as same as the last dimension of the input Then the input tensor will be logically flattened to a 2-D matrix. The matrix's
second dimension(row length) is as same as the :attr:`axis` dimension of the input
tensor, and the first dimension(column length) is the product of all other tensor, and the first dimension(column length) is the product of all other
dimensions of the input tensor. For each row of the matrix, the softmax operator dimensions of the input tensor. For each row of the matrix, the softmax operator
squashes the K-dimensional(K is the width of the matrix, which is also the size squashes the K-dimensional(K is the width of the matrix, which is also the size
of the input tensor's last dimension) vector of arbitrary real values to a of the input tensor's :attr:`axis` dimension) vector of arbitrary real values to a
K-dimensional vector of real values in the range [0, 1] that add up to 1. K-dimensional vector of real values in the range [0, 1] that add up to 1.
It computes the exponential of the given dimension and the sum of exponential It computes the exponential of the given dimension and the sum of exponential
values of all the other dimensions in the K-dimensional vector input. values of all the other dimensions in the K-dimensional vector input.
......
...@@ -1819,17 +1819,18 @@ def sequence_softmax(input, use_cudnn=False, name=None): ...@@ -1819,17 +1819,18 @@ def sequence_softmax(input, use_cudnn=False, name=None):
return softmax_out return softmax_out
def softmax(input, use_cudnn=False, name=None): def softmax(input, use_cudnn=False, name=None, axis=-1):
""" """
The input of the softmax operator is a tensor of any rank. The output tensor The input of the softmax operator is a tensor of any rank. The output tensor
has the same shape as the input. has the same shape as the input.
The input tensor will first be logically flattened to a 2-D matrix. The matrix's The :attr:`axis` th dimension of the input tensor will be permuted to the last.
second dimension(row length) is as same as the last dimension of the input Then the input tensor will be logically flattened to a 2-D matrix. The matrix's
second dimension(row length) is as same as the :attr:`axis` th dimension of the input
tensor, and the first dimension(column length) is the product of all other tensor, and the first dimension(column length) is the product of all other
dimensions of the input tensor. For each row of the matrix, the softmax operator dimensions of the input tensor. For each row of the matrix, the softmax operator
squashes the K-dimensional(K is the width of the matrix, which is also the size squashes the K-dimensional(K is the width of the matrix, which is also the size
of the input tensor's last dimension) vector of arbitrary real values to a of the input tensor's :attr:`axis` th dimension) vector of arbitrary real values to a
K-dimensional vector of real values in the range [0, 1] that add up to 1. K-dimensional vector of real values in the range [0, 1] that add up to 1.
It computes the exponential of the given dimension and the sum of exponential It computes the exponential of the given dimension and the sum of exponential
...@@ -1851,6 +1852,7 @@ def softmax(input, use_cudnn=False, name=None): ...@@ -1851,6 +1852,7 @@ def softmax(input, use_cudnn=False, name=None):
False by default. Default: False False by default. Default: False
name (str|None): A name for this layer(optional). If set None, the layer name (str|None): A name for this layer(optional). If set None, the layer
will be named automatically. Default: None. will be named automatically. Default: None.
axis (int): The index of dimension to perform softmax calculation. Default: -1.
Returns: Returns:
Variable: output of softmax Variable: output of softmax
...@@ -1860,7 +1862,7 @@ def softmax(input, use_cudnn=False, name=None): ...@@ -1860,7 +1862,7 @@ def softmax(input, use_cudnn=False, name=None):
.. code-block:: python .. code-block:: python
fc = fluid.layers.fc(input=x, size=10) fc = fluid.layers.fc(input=x, size=10)
softmax = fluid.layers.softmax(input=fc) softmax = fluid.layers.softmax(input=fc, axis=1)
""" """
helper = LayerHelper('softmax', **locals()) helper = LayerHelper('softmax', **locals())
...@@ -1870,7 +1872,10 @@ def softmax(input, use_cudnn=False, name=None): ...@@ -1870,7 +1872,10 @@ def softmax(input, use_cudnn=False, name=None):
type="softmax", type="softmax",
inputs={"X": input}, inputs={"X": input},
outputs={"Out": softmax_out}, outputs={"Out": softmax_out},
attrs={"use_cudnn": use_cudnn}) attrs={
"axis": axis,
"use_cudnn": use_cudnn
})
return softmax_out return softmax_out
......
...@@ -513,7 +513,7 @@ class TestBook(unittest.TestCase): ...@@ -513,7 +513,7 @@ class TestBook(unittest.TestCase):
with program_guard(program): with program_guard(program):
data = layers.data(name='data', shape=[10], dtype='float32') data = layers.data(name='data', shape=[10], dtype='float32')
hid = layers.fc(input=data, size=20) hid = layers.fc(input=data, size=20)
self.assertIsNotNone(layers.softmax(hid)) self.assertIsNotNone(layers.softmax(hid, axis=1))
print(str(program)) print(str(program))
def test_space_to_depth(self): def test_space_to_depth(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册