未验证 提交 2a3d75bc 编写于 作者: Y YangQun 提交者: GitHub

[Zero-Dim] Support 0-D tensor for some oneDNN unary kernels (#51687)

* support 0-d tensor for element wise unary ops

* fix python code style check

* fix approval check

* support 0-d tensor for onednn softmax and logsoftmax kernels

* fix commnets

* fix some unittests
上级 31f81685
...@@ -60,9 +60,6 @@ void TransformData(const phi::KernelKey &expected_kernel_type, ...@@ -60,9 +60,6 @@ void TransformData(const phi::KernelKey &expected_kernel_type,
if (lin != DataLayout::ONEDNN && lout == DataLayout::ONEDNN) { if (lin != DataLayout::ONEDNN && lout == DataLayout::ONEDNN) {
// Case1 - transform from Non-ONEDNN OPKernel to ONEDNN OPKernel // Case1 - transform from Non-ONEDNN OPKernel to ONEDNN OPKernel
// Just set layout/format. No real transform occur // Just set layout/format. No real transform occur
auto out_format = phi::funcs::OneDNNFormatForSize(
in.dims().size(), phi::funcs::ToOneDNNFormat(lin));
out.ShareDataWith(input_tensor); out.ShareDataWith(input_tensor);
// For NHWC data we need reshape of tensors as MKL-DNN // For NHWC data we need reshape of tensors as MKL-DNN
// is expecting NHWC dims description order // is expecting NHWC dims description order
...@@ -72,10 +69,9 @@ void TransformData(const phi::KernelKey &expected_kernel_type, ...@@ -72,10 +69,9 @@ void TransformData(const phi::KernelKey &expected_kernel_type,
// NHWC or NCHW // NHWC or NCHW
phi::OneDNNContext::tls().set_cur_paddle_data_layout(lin); phi::OneDNNContext::tls().set_cur_paddle_data_layout(lin);
} }
dnnl::memory::desc out_mem_desc(
vectorize(out.dims()), dnnl::memory::desc out_mem_desc =
phi::funcs::ToOneDNNDataType(in.dtype()), phi::funcs::make_memory_desc(out, lin);
out_format);
out.set_mem_desc(out_mem_desc); out.set_mem_desc(out_mem_desc);
} else { } else {
// Case2 - transfrom from ONEDNN OPKernel to Non-ONEDNN OPKernel // Case2 - transfrom from ONEDNN OPKernel to Non-ONEDNN OPKernel
......
...@@ -36,7 +36,9 @@ void* to_void_cast(const Type* t) { ...@@ -36,7 +36,9 @@ void* to_void_cast(const Type* t) {
inline OneDNNMemoryFormat OneDNNFormatForSize(size_t dims_size, inline OneDNNMemoryFormat OneDNNFormatForSize(size_t dims_size,
OneDNNMemoryFormat data_format) { OneDNNMemoryFormat data_format) {
if (dims_size == 1) { if (dims_size == 0) {
return OneDNNMemoryFormat::x;
} else if (dims_size == 1) {
return OneDNNMemoryFormat::x; return OneDNNMemoryFormat::x;
} else if (dims_size == 2) { } else if (dims_size == 2) {
return OneDNNMemoryFormat::nc; return OneDNNMemoryFormat::nc;
......
...@@ -778,7 +778,8 @@ class SoftmaxOneDNNHandler ...@@ -778,7 +778,8 @@ class SoftmaxOneDNNHandler
errors::InvalidArgument( errors::InvalidArgument(
"The shape of input and output tensor must be identical.")); "The shape of input and output tensor must be identical."));
const int canonical_axis = funcs::CanonicalAxis(axis, x->dims().size()); int rank = x->dims().size() != 0 ? x->dims().size() : 1;
const int canonical_axis = funcs::CanonicalAxis(axis, rank);
this->AcquireForwardPrimitiveDescriptor( this->AcquireForwardPrimitiveDescriptor(
dnnl::prop_kind::forward_scoring, x->mem_desc(), canonical_axis); dnnl::prop_kind::forward_scoring, x->mem_desc(), canonical_axis);
} }
...@@ -792,8 +793,8 @@ class SoftmaxOneDNNHandler ...@@ -792,8 +793,8 @@ class SoftmaxOneDNNHandler
dnnl::softmax_forward, dnnl::softmax_forward,
dnnl::softmax_backward>(onednn_engine, dnnl::softmax_backward>(onednn_engine,
cpu_place) { cpu_place) {
const int canonical_axis = int rank = out_grad->dims().size() != 0 ? out_grad->dims().size() : 1;
funcs::CanonicalAxis(axis, out_grad->dims().size()); const int canonical_axis = funcs::CanonicalAxis(axis, rank);
this->AcquireForwardPrimitiveDescriptor( this->AcquireForwardPrimitiveDescriptor(
dnnl::prop_kind::forward_scoring, out->mem_desc(), canonical_axis); dnnl::prop_kind::forward_scoring, out->mem_desc(), canonical_axis);
this->AcquireBackwardPrimitiveDescriptor( this->AcquireBackwardPrimitiveDescriptor(
...@@ -1648,7 +1649,13 @@ class SoftplusOneDNNHandler : public OneDNNHandlerNoCachingT<T, dnnl::binary> { ...@@ -1648,7 +1649,13 @@ class SoftplusOneDNNHandler : public OneDNNHandlerNoCachingT<T, dnnl::binary> {
dnnl::primitive_attr attrs; dnnl::primitive_attr attrs;
attrs.set_post_ops(post_ops); attrs.set_post_ops(post_ops);
auto x_tz = phi::vectorize(x->dims()); // if x is a 0-D tensor, then:
// x->dims() is [] and x->mem_desc().dims() is [1], we should use
// the later shape since oneDNN doesn't support 0-D shape.
// else, then:
// x->dims() == x->mem_desc().dims()
// so, we can directly use x->mem_desc().dims() here
auto x_tz = x->mem_desc().dims();
auto beta_tz = std::vector<int64_t>(x_tz.size(), 1); auto beta_tz = std::vector<int64_t>(x_tz.size(), 1);
auto beta_md = dnnl::memory::desc( auto beta_md = dnnl::memory::desc(
beta_tz, OneDNNGetDataType<T>(), GetPlainOneDNNFormat(x_tz.size())); beta_tz, OneDNNGetDataType<T>(), GetPlainOneDNNFormat(x_tz.size()));
......
...@@ -51,6 +51,27 @@ void* GetDataFromTensor(const DenseTensor& tensor, ...@@ -51,6 +51,27 @@ void* GetDataFromTensor(const DenseTensor& tensor,
} }
} }
// This helper function is used to construct a dnnl memory descriptor from a
// reference dense tensor and a target layout. For 0-D tensor case, we will
// construct a 1-D memory descriptor with shape [1], since oneDNN didn't support
// 0-D now.
dnnl::memory::desc make_memory_desc(const phi::DenseTensor& ref_tensor,
phi::DataLayout target_layout) {
auto ref_dims = vectorize<int64_t>(ref_tensor.dims());
auto ref_type = ToOneDNNDataType(ref_tensor.dtype());
PADDLE_ENFORCE_NE(ref_type,
OneDNNDataType::undef,
errors::InvalidArgument(
"Ref tensor type (%s) is not supported by oneDNN.",
ref_tensor.dtype()));
auto md_dims = ref_dims.size() != 0 ? ref_dims : std::vector<int64_t>{1};
auto md_format =
OneDNNFormatForSize(md_dims.size(), ToOneDNNFormat(target_layout));
dnnl::memory::desc md(md_dims, ref_type, md_format);
return md;
}
void TransDataLayoutFromOneDNN(DataLayout in_layout, void TransDataLayoutFromOneDNN(DataLayout in_layout,
DataLayout out_layout, DataLayout out_layout,
const DenseTensor& in, const DenseTensor& in,
...@@ -64,19 +85,7 @@ void TransDataLayoutFromOneDNN(DataLayout in_layout, ...@@ -64,19 +85,7 @@ void TransDataLayoutFromOneDNN(DataLayout in_layout,
auto* dev_ctx = dynamic_cast<OneDNNContext*>(pool.Get(place)); auto* dev_ctx = dynamic_cast<OneDNNContext*>(pool.Get(place));
auto& cpu_engine = dev_ctx->GetEngine(); auto& cpu_engine = dev_ctx->GetEngine();
auto in_tz = vectorize<int64_t>(in.dims()); dnnl::memory::desc out_mem_desc = make_memory_desc(in, out_layout);
auto out_tz = in_tz;
auto in_type = ToOneDNNDataType(in.dtype());
PADDLE_ENFORCE_NE(
in_type,
OneDNNDataType::undef,
errors::InvalidArgument("Input tensor type (%s) is not supported.",
in.dtype()));
auto out_format =
OneDNNFormatForSize(in_tz.size(), ToOneDNNFormat(out_layout));
dnnl::memory::desc out_mem_desc(out_tz, in_type, out_format);
// output tensor has the same dims as input. Reorder don't change dims // output tensor has the same dims as input. Reorder don't change dims
out->set_mem_desc(out_mem_desc); out->set_mem_desc(out_mem_desc);
...@@ -85,6 +94,8 @@ void TransDataLayoutFromOneDNN(DataLayout in_layout, ...@@ -85,6 +94,8 @@ void TransDataLayoutFromOneDNN(DataLayout in_layout,
// Note(0x45f): Using initialized() to support slice Tensors // Note(0x45f): Using initialized() to support slice Tensors
// with shapes like [0, 0, 0]. // with shapes like [0, 0, 0].
if (in.initialized() && ((in.mem_desc() != out->mem_desc()) || always_copy)) { if (in.initialized() && ((in.mem_desc() != out->mem_desc()) || always_copy)) {
auto in_tz = vectorize<int64_t>(in.dims());
auto in_type = ToOneDNNDataType(in.dtype());
void* in_data = GetDataFromTensor(in, in_type); void* in_data = GetDataFromTensor(in, in_type);
ReorderOneDNNHandler handler(in_tz, in.dtype(), in_type, cpu_engine); ReorderOneDNNHandler handler(in_tz, in.dtype(), in_type, cpu_engine);
......
...@@ -85,6 +85,9 @@ void TransDataLayoutFromOneDNN(DataLayout in_layout, ...@@ -85,6 +85,9 @@ void TransDataLayoutFromOneDNN(DataLayout in_layout,
bool always_copy = false); bool always_copy = false);
void* GetDataFromTensor(const DenseTensor& tensor, OneDNNDataType type); void* GetDataFromTensor(const DenseTensor& tensor, OneDNNDataType type);
dnnl::memory::desc make_memory_desc(const phi::DenseTensor& ref_tensor,
phi::DataLayout target_layout);
#endif #endif
} // namespace funcs } // namespace funcs
......
...@@ -32,8 +32,10 @@ class LogSoftmaxOneDNNHandler ...@@ -32,8 +32,10 @@ class LogSoftmaxOneDNNHandler
const int axis) const int axis)
: funcs::OneDNNHandlerNoCachingT<T, dnnl::logsoftmax_forward>( : funcs::OneDNNHandlerNoCachingT<T, dnnl::logsoftmax_forward>(
onednn_engine, cpu_place) { onednn_engine, cpu_place) {
const int rank = x.dims().size() != 0 ? x.dims().size() : 1;
const int canonical_axis = funcs::CanonicalAxis(axis, rank);
this->AcquireForwardPrimitiveDescriptor( this->AcquireForwardPrimitiveDescriptor(
dnnl::prop_kind::forward_inference, x.mem_desc(), axis); dnnl::prop_kind::forward_inference, x.mem_desc(), canonical_axis);
} }
}; };
...@@ -43,7 +45,6 @@ void LogSoftmaxKernel(const Context& dev_ctx, ...@@ -43,7 +45,6 @@ void LogSoftmaxKernel(const Context& dev_ctx,
int axis, int axis,
DenseTensor* out) { DenseTensor* out) {
const auto& onednn_engine = dev_ctx.GetEngine(); const auto& onednn_engine = dev_ctx.GetEngine();
axis = axis >= 0 ? axis : x.dims().size() + axis;
LogSoftmaxOneDNNHandler<T> handler( LogSoftmaxOneDNNHandler<T> handler(
onednn_engine, dev_ctx.GetPlace(), x, axis); onednn_engine, dev_ctx.GetPlace(), x, axis);
......
...@@ -136,9 +136,6 @@ void TransferLayoutMKLDNN(const Context& dev_ctx, ...@@ -136,9 +136,6 @@ void TransferLayoutMKLDNN(const Context& dev_ctx,
if (src_layout != DataLayout::ONEDNN && dst_layout == DataLayout::ONEDNN) { if (src_layout != DataLayout::ONEDNN && dst_layout == DataLayout::ONEDNN) {
// Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel // Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel
// Just set layout/format. No real transform occur // Just set layout/format. No real transform occur
auto out_format = funcs::OneDNNFormatForSize(
x.dims().size(), funcs::ToOneDNNFormat(src_layout));
out->ShareDataWith(x); out->ShareDataWith(x);
// For NHWC data we need reshape of tensors as MKL-DNN // For NHWC data we need reshape of tensors as MKL-DNN
// is expecting NHWC dims description order // is expecting NHWC dims description order
...@@ -148,9 +145,7 @@ void TransferLayoutMKLDNN(const Context& dev_ctx, ...@@ -148,9 +145,7 @@ void TransferLayoutMKLDNN(const Context& dev_ctx,
OneDNNContext::tls().set_cur_paddle_data_layout(src_layout); OneDNNContext::tls().set_cur_paddle_data_layout(src_layout);
} }
dnnl::memory::desc out_mem_desc(vectorize<int64_t>(out->dims()), dnnl::memory::desc out_mem_desc = funcs::make_memory_desc(*out, src_layout);
funcs::ToOneDNNDataType(x.dtype()),
out_format);
out->set_mem_desc(out_mem_desc); out->set_mem_desc(out_mem_desc);
} else if (src_layout == DataLayout::ONEDNN && } else if (src_layout == DataLayout::ONEDNN &&
dst_layout != DataLayout::ONEDNN) { dst_layout != DataLayout::ONEDNN) {
......
...@@ -24,15 +24,27 @@ import paddle.nn.functional as F ...@@ -24,15 +24,27 @@ import paddle.nn.functional as F
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.test_activation_op import ( from paddle.fluid.tests.unittests.test_activation_op import (
TestAbs, TestAbs,
TestAbs_ZeroDim,
TestActivation, TestActivation,
TestActivation_ZeroDim,
TestHardSwish, TestHardSwish,
TestHardSwish_ZeroDim,
TestLeakyRelu, TestLeakyRelu,
TestLeakyRelu_ZeroDim,
TestRelu, TestRelu,
TestRelu6, TestRelu6,
TestRelu6_ZeroDim,
TestRelu_ZeroDim,
TestSigmoid, TestSigmoid,
TestSigmoid_ZeroDim,
TestSoftplus,
TestSoftplus_ZeroDim,
TestSqrt, TestSqrt,
TestSqrt_ZeroDim,
TestSwish, TestSwish,
TestSwish_ZeroDim,
TestTanh, TestTanh,
TestTanh_ZeroDim,
) )
from paddle.fluid.tests.unittests.test_gelu_op import gelu from paddle.fluid.tests.unittests.test_gelu_op import gelu
...@@ -47,6 +59,16 @@ class TestMKLDNNReluDim2(TestRelu): ...@@ -47,6 +59,16 @@ class TestMKLDNNReluDim2(TestRelu):
self.dtype = np.float32 self.dtype = np.float32
class TestMKLDNNRelu_ZeroDim(TestRelu_ZeroDim):
def setUp(self):
super().setUp()
self.attrs = {"use_mkldnn": True}
def init_dtype(self):
self.dtype = np.float32
class TestMKLDNNRelu6Dim2(TestRelu6): class TestMKLDNNRelu6Dim2(TestRelu6):
def setUp(self): def setUp(self):
super().setUp() super().setUp()
...@@ -56,6 +78,15 @@ class TestMKLDNNRelu6Dim2(TestRelu6): ...@@ -56,6 +78,15 @@ class TestMKLDNNRelu6Dim2(TestRelu6):
self.dtype = np.float32 self.dtype = np.float32
class TestMKLDNNRelu6_ZeroDim(TestRelu6_ZeroDim):
def setUp(self):
super().setUp()
self.attrs.update({"use_mkldnn": True})
def init_dtype(self):
self.dtype = np.float32
class TestMKLDNNLeakyReluDim2(TestLeakyRelu): class TestMKLDNNLeakyReluDim2(TestLeakyRelu):
def setUp(self): def setUp(self):
super().setUp() super().setUp()
...@@ -74,6 +105,16 @@ class TestMKLDNNLeakyReluDim2(TestLeakyRelu): ...@@ -74,6 +105,16 @@ class TestMKLDNNLeakyReluDim2(TestLeakyRelu):
self.check_grad(['X'], 'Out', check_dygraph=False) self.check_grad(['X'], 'Out', check_dygraph=False)
class TestMKLDNNLeakyRelu_ZeroDim(TestLeakyRelu_ZeroDim):
def setUp(self):
super().setUp()
self.attrs = {"use_mkldnn": True}
def init_dtype(self):
self.dtype = np.float32
class TestMKLDNNGeluDim2(TestActivation): class TestMKLDNNGeluDim2(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "gelu" self.op_type = "gelu"
...@@ -88,6 +129,20 @@ class TestMKLDNNGeluDim2(TestActivation): ...@@ -88,6 +129,20 @@ class TestMKLDNNGeluDim2(TestActivation):
self.attrs = {"use_mkldnn": True} self.attrs = {"use_mkldnn": True}
class TestMKLDNNGelu_ZeroDim(TestActivation_ZeroDim):
def setUp(self):
self.op_type = "gelu"
self.python_api = F.gelu
self.dtype = np.float32
x = np.random.uniform(-1, 1, []).astype(self.dtype)
out = gelu(x, False)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
self.attrs = {"use_mkldnn": True}
class TestMKLDNNGeluDim2Approx(TestActivation): class TestMKLDNNGeluDim2Approx(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "gelu" self.op_type = "gelu"
...@@ -112,6 +167,16 @@ class TestMKLDNNTanhDim2(TestTanh): ...@@ -112,6 +167,16 @@ class TestMKLDNNTanhDim2(TestTanh):
self.dtype = np.float32 self.dtype = np.float32
class TestMKLDNNTanh_ZeroDim(TestTanh_ZeroDim):
def setUp(self):
super().setUp()
self.attrs = {"use_mkldnn": True}
def init_dtype(self):
self.dtype = np.float32
class TestMKLDNNSqrtDim2(TestSqrt): class TestMKLDNNSqrtDim2(TestSqrt):
def setUp(self): def setUp(self):
super().setUp() super().setUp()
...@@ -122,6 +187,16 @@ class TestMKLDNNSqrtDim2(TestSqrt): ...@@ -122,6 +187,16 @@ class TestMKLDNNSqrtDim2(TestSqrt):
self.dtype = np.float32 self.dtype = np.float32
class TestMKLDNNSqrt_ZeroDim(TestSqrt_ZeroDim):
def setUp(self):
super().setUp()
self.attrs = {"use_mkldnn": True}
def init_dtype(self):
self.dtype = np.float32
class TestMKLDNNAbsDim2(TestAbs): class TestMKLDNNAbsDim2(TestAbs):
def setUp(self): def setUp(self):
super().setUp() super().setUp()
...@@ -131,6 +206,15 @@ class TestMKLDNNAbsDim2(TestAbs): ...@@ -131,6 +206,15 @@ class TestMKLDNNAbsDim2(TestAbs):
self.dtype = np.float32 self.dtype = np.float32
class TestMKLDNNAbs_ZeroDim(TestAbs_ZeroDim):
def setUp(self):
super().setUp()
self.attrs = {"use_mkldnn": True}
def init_dtype(self):
self.dtype = np.float32
class TestMKLDNNSwishDim2(TestSwish): class TestMKLDNNSwishDim2(TestSwish):
def setUp(self): def setUp(self):
super().setUp() super().setUp()
...@@ -142,18 +226,41 @@ class TestMKLDNNSwishDim2(TestSwish): ...@@ -142,18 +226,41 @@ class TestMKLDNNSwishDim2(TestSwish):
self.dtype = np.float32 self.dtype = np.float32
class TestMKLDNNSwish_ZeroDim(TestSwish_ZeroDim):
def setUp(self):
super().setUp()
self.attrs["use_mkldnn"] = True
self.check_eager = False
def init_dtype(self):
self.dtype = np.float32
class TestMKLDNNHardSwishDim2(TestHardSwish): class TestMKLDNNHardSwishDim2(TestHardSwish):
def setUp(self): def setUp(self):
super().setUp() super().setUp()
self.attrs = {"use_mkldnn": True} self.attrs = {"use_mkldnn": True}
class TestMKLDNNHardSwish_ZeroDim(TestHardSwish_ZeroDim):
def setUp(self):
super().setUp()
self.attrs = {"use_mkldnn": True}
class TestMKLDNNSigmoidDim2(TestSigmoid): class TestMKLDNNSigmoidDim2(TestSigmoid):
def setUp(self): def setUp(self):
super().setUp() super().setUp()
self.attrs = {"use_mkldnn": True} self.attrs = {"use_mkldnn": True}
class TestMKLDNNSigmoid_ZeroDim(TestSigmoid_ZeroDim):
def setUp(self):
super().setUp()
self.attrs = {"use_mkldnn": True}
class TestMKLDNNReluDim4(TestRelu): class TestMKLDNNReluDim4(TestRelu):
def setUp(self): def setUp(self):
super().setUp() super().setUp()
...@@ -376,6 +483,20 @@ class TestMKLDNNMish(TestActivation): ...@@ -376,6 +483,20 @@ class TestMKLDNNMish(TestActivation):
self.attrs = {"use_mkldnn": True} self.attrs = {"use_mkldnn": True}
class TestMKLDNNMish_ZeroDim(TestActivation_ZeroDim):
def setUp(self):
self.op_type = "mish"
self.python_api = F.mish
self.dtype = np.float32
x = np.random.uniform(0.1, 1, []).astype(self.dtype)
out = x * np.tanh(np.log(1 + np.exp(x)))
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
self.attrs = {"use_mkldnn": True}
class TestMKLDNNRound(TestActivation): class TestMKLDNNRound(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "round" self.op_type = "round"
...@@ -388,6 +509,18 @@ class TestMKLDNNRound(TestActivation): ...@@ -388,6 +509,18 @@ class TestMKLDNNRound(TestActivation):
self.attrs = {"use_mkldnn": True} self.attrs = {"use_mkldnn": True}
class TestMKLDNNRound_ZeroDim(TestActivation_ZeroDim):
def setUp(self):
self.op_type = "round"
self.python_api = paddle.round
x = np.random.uniform(0.1, 1, []).astype(np.float32)
out = np.round(x)
self.inputs = {'X': x}
self.outputs = {'Out': out}
self.attrs = {"use_mkldnn": True}
class TestMKLDNNSigmoidDim4(TestSigmoid): class TestMKLDNNSigmoidDim4(TestSigmoid):
def setUp(self): def setUp(self):
super().setUp() super().setUp()
...@@ -418,6 +551,25 @@ class TestMKLDNNEluDefaultAlpha(TestActivation): ...@@ -418,6 +551,25 @@ class TestMKLDNNEluDefaultAlpha(TestActivation):
self.alpha = 1.0 self.alpha = 1.0
class TestMKLDNNEluDefaultAlpha_ZeroDim(TestActivation_ZeroDim):
def setUp(self):
self.op_type = "elu"
self.python_api = F.elu
self.set_alpha()
x = np.random.random(()).astype("float32")
self.inputs = {'X': x}
self.attrs = {'use_mkldnn': True, 'alpha': self.alpha}
self.outputs = {
'Out': np.maximum(0, x)
+ np.minimum(0, self.alpha * (np.exp(x) - 1))
}
def set_alpha(self):
self.alpha = 1.0
class TestMKLDNNEluCustomAlpha(TestMKLDNNEluDefaultAlpha): class TestMKLDNNEluCustomAlpha(TestMKLDNNEluDefaultAlpha):
def set_alpha(self): def set_alpha(self):
self.alpha = 2.5 self.alpha = 2.5
...@@ -434,6 +586,17 @@ class TestMKLDNNExpOp(TestActivation): ...@@ -434,6 +586,17 @@ class TestMKLDNNExpOp(TestActivation):
self.outputs = {'Out': np.exp(x)} self.outputs = {'Out': np.exp(x)}
class TestMKLDNNExpOp_ZeroDim(TestActivation_ZeroDim):
def setUp(self):
self.op_type = "exp"
self.python_api = paddle.exp
x = np.random.random(()).astype("float32")
self.inputs = {'X': x}
self.attrs = {'use_mkldnn': True}
self.outputs = {'Out': np.exp(x)}
# Check if primitives already exist in backward # Check if primitives already exist in backward
class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase): class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase):
def setUp(self): def setUp(self):
...@@ -458,5 +621,23 @@ class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase): ...@@ -458,5 +621,23 @@ class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase):
) )
class TestMKLDNNSoftplusDim2(TestSoftplus):
def setUp(self):
super().setUp()
self.attrs.update({"use_mkldnn": True})
def init_dtype(self):
self.dtype = np.float32
class TestMKLDNNSoftplus_ZeroDim(TestSoftplus_ZeroDim):
def setUp(self):
super().setUp()
self.attrs.update({"use_mkldnn": True})
def init_dtype(self):
self.dtype = np.float32
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -26,10 +26,11 @@ from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16 ...@@ -26,10 +26,11 @@ from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
) )
class TestCastBF16ToFP32MKLDNNOp(OpTest): class TestCastBF16ToFP32MKLDNNOp(OpTest):
def init_data(self): def init_data(self):
self.out = np.random.random(size=[10, 10]).astype("float32") self.out = np.random.random(size=self.shape).astype("float32")
self.x = convert_float_to_uint16(self.out) self.x = convert_float_to_uint16(self.out)
def setUp(self): def setUp(self):
self.init_shape()
self.init_data() self.init_data()
self.inputs = {'X': self.x} self.inputs = {'X': self.x}
self.outputs = {'Out': self.out} self.outputs = {'Out': self.out}
...@@ -58,6 +59,9 @@ class TestCastBF16ToFP32MKLDNNOp(OpTest): ...@@ -58,6 +59,9 @@ class TestCastBF16ToFP32MKLDNNOp(OpTest):
user_defined_grad_outputs=[self.outputs['Out']], user_defined_grad_outputs=[self.outputs['Out']],
) )
def init_shape(self):
self.shape = [10, 10]
class TestCastFP32ToBF16MKLDNNOp(TestCastBF16ToFP32MKLDNNOp): class TestCastFP32ToBF16MKLDNNOp(TestCastBF16ToFP32MKLDNNOp):
def init_data(self): def init_data(self):
...@@ -77,6 +81,11 @@ class TestCastFP32ToFP32MKLDNNOp(TestCastBF16ToFP32MKLDNNOp): ...@@ -77,6 +81,11 @@ class TestCastFP32ToFP32MKLDNNOp(TestCastBF16ToFP32MKLDNNOp):
self.out = self.x self.out = self.x
class TestCastBF16ToFP32MKLDNNOp_ZeroDim(TestCastBF16ToFP32MKLDNNOp):
def init_shape(self):
self.shape = []
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static() paddle.enable_static()
unittest.main() unittest.main()
...@@ -25,10 +25,10 @@ from paddle.fluid.tests.unittests.op_test import ( ...@@ -25,10 +25,10 @@ from paddle.fluid.tests.unittests.op_test import (
) )
@OpTestTool.skip_if_not_cpu_bf16()
class TestClipOneDNNOp(OpTest): class TestClipOneDNNOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "clip" self.op_type = "clip"
self.init_shape()
self.set_inputs() self.set_inputs()
self.set_attrs() self.set_attrs()
self.set_additional_inputs() self.set_additional_inputs()
...@@ -47,8 +47,13 @@ class TestClipOneDNNOp(OpTest): ...@@ -47,8 +47,13 @@ class TestClipOneDNNOp(OpTest):
self.outputs = {'Out': np.clip(self.x_fp32, self.min, self.max)} self.outputs = {'Out': np.clip(self.x_fp32, self.min, self.max)}
def init_shape(self):
self.shape = [10, 10]
def set_inputs(self): def set_inputs(self):
self.inputs = {'X': np.random.random((10, 10)).astype(np.float32) * 25} self.inputs = {
'X': np.array(np.random.random(self.shape).astype(np.float32) * 25)
}
self.x_fp32 = self.inputs['X'] self.x_fp32 = self.inputs['X']
def set_additional_inputs(self): def set_additional_inputs(self):
...@@ -67,6 +72,11 @@ class TestClipOneDNNOp(OpTest): ...@@ -67,6 +72,11 @@ class TestClipOneDNNOp(OpTest):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
class TestClipOneDNNOp_ZeroDim(TestClipOneDNNOp):
def init_shape(self):
self.shape = []
class TestClipMinAsInputOneDNNOp(TestClipOneDNNOp): class TestClipMinAsInputOneDNNOp(TestClipOneDNNOp):
def set_additional_inputs(self): def set_additional_inputs(self):
self.inputs['Min'] = np.array([6.8]).astype('float32') self.inputs['Min'] = np.array([6.8]).astype('float32')
......
...@@ -26,7 +26,6 @@ from paddle.fluid.tests.unittests.op_test import ( ...@@ -26,7 +26,6 @@ from paddle.fluid.tests.unittests.op_test import (
from paddle.fluid.tests.unittests.test_log_softmax import ref_log_softmax from paddle.fluid.tests.unittests.test_log_softmax import ref_log_softmax
@OpTestTool.skip_if_not_cpu_bf16()
class TestLogSoftmaxOneDNNOp(OpTest): class TestLogSoftmaxOneDNNOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = 'log_softmax' self.op_type = 'log_softmax'
...@@ -35,7 +34,11 @@ class TestLogSoftmaxOneDNNOp(OpTest): ...@@ -35,7 +34,11 @@ class TestLogSoftmaxOneDNNOp(OpTest):
self.set_axis() self.set_axis()
x = np.random.uniform(0.1, 1.0, self.shape).astype(np.float32) x = np.random.uniform(0.1, 1.0, self.shape).astype(np.float32)
out = np.apply_along_axis(ref_log_softmax, self.axis, x) out = (
np.apply_along_axis(ref_log_softmax, self.axis, x)
if len(self.shape) > 0
else np.array(0.0).astype(self.dtype)
)
if self.dtype == np.uint16: if self.dtype == np.uint16:
x = convert_float_to_uint16(x) x = convert_float_to_uint16(x)
...@@ -57,6 +60,11 @@ class TestLogSoftmaxOneDNNOp(OpTest): ...@@ -57,6 +60,11 @@ class TestLogSoftmaxOneDNNOp(OpTest):
self.check_output_with_place(core.CPUPlace()) self.check_output_with_place(core.CPUPlace())
class TestLogSoftmax0DOneDNNOp(TestLogSoftmaxOneDNNOp):
def set_shape(self):
self.shape = []
class TestLogSoftmax1DOneDNNOp(TestLogSoftmaxOneDNNOp): class TestLogSoftmax1DOneDNNOp(TestLogSoftmaxOneDNNOp):
def set_shape(self): def set_shape(self):
self.shape = [100] self.shape = [100]
...@@ -78,11 +86,13 @@ class TestLogSoftmaxPositiveAxisOneDNNOp(TestLogSoftmaxOneDNNOp): ...@@ -78,11 +86,13 @@ class TestLogSoftmaxPositiveAxisOneDNNOp(TestLogSoftmaxOneDNNOp):
# BF16 TESTS # BF16 TESTS
@OpTestTool.skip_if_not_cpu_bf16()
class TestLogSoftmax1DBF16OneDNNOp(TestLogSoftmax1DOneDNNOp): class TestLogSoftmax1DBF16OneDNNOp(TestLogSoftmax1DOneDNNOp):
def set_dtype(self): def set_dtype(self):
self.dtype = np.uint16 self.dtype = np.uint16
@OpTestTool.skip_if_not_cpu_bf16()
class TestLogSoftmaxPositiveAxisBF16OneDNNOp( class TestLogSoftmaxPositiveAxisBF16OneDNNOp(
TestLogSoftmaxPositiveAxisOneDNNOp TestLogSoftmaxPositiveAxisOneDNNOp
): ):
...@@ -90,9 +100,10 @@ class TestLogSoftmaxPositiveAxisBF16OneDNNOp( ...@@ -90,9 +100,10 @@ class TestLogSoftmaxPositiveAxisBF16OneDNNOp(
self.dtype = np.uint16 self.dtype = np.uint16
@OpTestTool.skip_if_not_cpu_bf16()
class TestLogSoftmax5DBF16OneDNNOp(TestLogSoftmax5DOneDNNOp): class TestLogSoftmax5DBF16OneDNNOp(TestLogSoftmax5DOneDNNOp):
def set_shape(self): def set_dtype(self):
self.shape = [2, 3, 4, 5, 6] self.dtype = np.uint16
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -22,14 +22,18 @@ from paddle.fluid.tests.unittests.op_test import OpTest ...@@ -22,14 +22,18 @@ from paddle.fluid.tests.unittests.op_test import OpTest
class TestScaleOp(OpTest): class TestScaleOp(OpTest):
def setUp(self): def setUp(self):
self.init_shape()
self.op_type = "scale" self.op_type = "scale"
self.inputs = {'X': np.random.random((10, 10)).astype(np.float32)} self.inputs = {'X': np.random.random(self.shape).astype(np.float32)}
self.attrs = {'scale': -2.3, 'use_mkldnn': True, 'bias': 0.2} self.attrs = {'scale': -2.3, 'use_mkldnn': True, 'bias': 0.2}
self.use_mkldnn = True self.use_mkldnn = True
self.outputs = { self.outputs = {
'Out': (self.inputs['X'] * self.attrs['scale']) + self.attrs['bias'] 'Out': (self.inputs['X'] * self.attrs['scale']) + self.attrs['bias']
} }
def init_shape(self):
self.shape = [10, 10]
def test_check_output(self): def test_check_output(self):
self.check_output(check_dygraph=False) self.check_output(check_dygraph=False)
...@@ -37,6 +41,11 @@ class TestScaleOp(OpTest): ...@@ -37,6 +41,11 @@ class TestScaleOp(OpTest):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out')
class TestScaleOp_ZeroDim(TestScaleOp):
def init_shape(self):
self.shape = []
class TestScaleOpBiasNotAfterScale(OpTest): class TestScaleOpBiasNotAfterScale(OpTest):
def setUp(self): def setUp(self):
self.op_type = "scale" self.op_type = "scale"
......
...@@ -26,6 +26,7 @@ from paddle.fluid.tests.unittests.test_softmax_op import ( ...@@ -26,6 +26,7 @@ from paddle.fluid.tests.unittests.test_softmax_op import (
TestSoftmaxOp4, TestSoftmaxOp4,
TestSoftmaxOp5, TestSoftmaxOp5,
TestSoftmaxOp6, TestSoftmaxOp6,
TestSoftmaxOp_ZeroDim1,
) )
...@@ -95,26 +96,38 @@ class TestSoftmaxMKLDNNOp(TestSoftmaxOp): ...@@ -95,26 +96,38 @@ class TestSoftmaxMKLDNNOp(TestSoftmaxOp):
class TestSoftmaxMKLDNNOp2(TestSoftmaxOp2): class TestSoftmaxMKLDNNOp2(TestSoftmaxOp2):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
# oneDNN doesn't support float64 dtype
self.dtype = np.float32
class TestSoftmaxMKLDNNOp3(TestSoftmaxOp3): class TestSoftmaxMKLDNNOp3(TestSoftmaxOp3):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
self.dtype = np.float32
class TestSoftmaxMKLDNNOp4(TestSoftmaxOp4): class TestSoftmaxMKLDNNOp4(TestSoftmaxOp4):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
self.dtype = np.float32
class TestSoftmaxMKLDNNOp5(TestSoftmaxOp5): class TestSoftmaxMKLDNNOp5(TestSoftmaxOp5):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
self.dtype = np.float32
class TestSoftmaxMKLDNNOp6(TestSoftmaxOp6): class TestSoftmaxMKLDNNOp6(TestSoftmaxOp6):
def init_kernel_type(self): def init_kernel_type(self):
self.use_mkldnn = True self.use_mkldnn = True
self.dtype = np.float32
class TestSoftmaxMKLDNNOp_ZeroDim(TestSoftmaxOp_ZeroDim1):
def init_kernel_type(self):
self.use_mkldnn = True
self.dtype = np.float32
# Check if primitives already exist in backward # Check if primitives already exist in backward
......
...@@ -124,6 +124,7 @@ class TestSoftmaxOp_ZeroDim1(TestSoftmaxOp): ...@@ -124,6 +124,7 @@ class TestSoftmaxOp_ZeroDim1(TestSoftmaxOp):
self.use_mkldnn = False self.use_mkldnn = False
# explicilty use float32 for ROCm, as MIOpen does not yet support float64 # explicilty use float32 for ROCm, as MIOpen does not yet support float64
self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64 self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
self.init_kernel_type()
np.random.seed(0) np.random.seed(0)
x = np.random.uniform(0.1, 1, []).astype(self.dtype) x = np.random.uniform(0.1, 1, []).astype(self.dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册