未验证 提交 526a2117 编写于 作者: F FDInSky 提交者: GitHub

update conv error info (#24430)

* test=develop update conv error info

* test=develop update iou_similarity error info

* test=develop update some error info based review
上级 c2103c48
...@@ -23,17 +23,29 @@ class IOUSimilarityOp : public framework::OperatorWithKernel { ...@@ -23,17 +23,29 @@ class IOUSimilarityOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "iou_similarity");
"Input(X) of IOUSimilarityOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "iou_similarity");
PADDLE_ENFORCE(ctx->HasInput("Y"),
"Input(Y) of IOUSimilarityOp should not be null.");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y"); auto y_dims = ctx->GetInputDim("Y");
PADDLE_ENFORCE_EQ(x_dims.size(), 2UL, "The rank of Input(X) must be 2."); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE_EQ(x_dims[1], 4UL, "The shape of X is [N, 4]"); x_dims.size(), 2UL,
PADDLE_ENFORCE_EQ(y_dims.size(), 2UL, "The rank of Input(Y) must be 2."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(y_dims[1], 4UL, "The shape of Y is [M, 4]"); "The rank of Input(X) must be 2, but got dimension = %d.",
x_dims.size()));
PADDLE_ENFORCE_EQ(
x_dims[1], 4UL,
platform::errors::InvalidArgument(
"The shape of X is [N, 4], bug got dimension = %d.", x_dims[1]));
PADDLE_ENFORCE_EQ(
y_dims.size(), 2UL,
platform::errors::InvalidArgument(
"The rank of Input(Y) must be 2, but got dimension = %d.",
y_dims.size()));
PADDLE_ENFORCE_EQ(
y_dims[1], 4UL,
platform::errors::InvalidArgument(
"The shape of Y is [M, 4], but got dimension = %d.", y_dims[1]));
ctx->ShareLoD("X", /*->*/ "Out"); ctx->ShareLoD("X", /*->*/ "Out");
ctx->SetOutputDim("Out", framework::make_ddim({x_dims[0], y_dims[0]})); ctx->SetOutputDim("Out", framework::make_ddim({x_dims[0], y_dims[0]}));
......
...@@ -95,7 +95,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -95,7 +95,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
public: public:
void Compute(const paddle::framework::ExecutionContext& ctx) const override { void Compute(const paddle::framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()),
"It must use CPUPlace."); platform::errors::InvalidArgument("It must use CPUPlace."));
bool is_INT8 = bool is_INT8 =
std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value; std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value;
if (!is_INT8) { if (!is_INT8) {
...@@ -130,37 +130,59 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -130,37 +130,59 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto* output = ctx.Output<Tensor>("Output"); auto* output = ctx.Output<Tensor>("Output");
PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN, PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input tensor"); platform::errors::InvalidArgument(
PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef, "The input tensor's layout should be %d, but got %d.",
"Wrong format set for Input tensor"); DataLayout::kMKLDNN, input->layout()));
PADDLE_ENFORCE_NE(
input->format(), MKLDNNMemoryFormat::undef,
platform::errors::InvalidArgument("Wrong format set for Input tensor"));
PADDLE_ENFORCE_EQ(filter->layout(), DataLayout::kMKLDNN, PADDLE_ENFORCE_EQ(
"Wrong layout set for Filter tensor"); filter->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument(
"The Filter tensor's layout should be %d, but got %d.",
DataLayout::kMKLDNN, filter->layout()));
PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::undef, PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Filter tensor"); platform::errors::InvalidArgument(
"Wrong format set for Filter tensor"));
PADDLE_ENFORCE_GE(
input->dims().size(), 4, PADDLE_ENFORCE_GE(input->dims().size(), 4,
"Input must be with 4 or 5 dimensions, i.e. NCHW or NCDHW"); platform::errors::InvalidArgument(
PADDLE_ENFORCE_LE( "Input must be with 4 or 5 dimensions, i.e. NCHW or "
input->dims().size(), 5, "NCDHW, but got dimension = %d .",
"Input must be with 4 or 5 dimensions, i.e. NCHW or NCDHW"); input->dims().size()));
PADDLE_ENFORCE_LE(input->dims().size(), 5,
PADDLE_ENFORCE_GE( platform::errors::InvalidArgument(
filter->dims().size(), 4, "Input must be with 4 or 5 dimensions, i.e. NCHW or "
"Filter must be with 4 or 5 dimensions, i.e. OIHW or OIDHW"); "NCDHW, but got dimension = %d .",
PADDLE_ENFORCE_LE( input->dims().size()));
filter->dims().size(), 5,
"Filter must be with 4 or 5 dimensions, i.e. OIHW or OIDHW"); PADDLE_ENFORCE_GE(filter->dims().size(), 4,
platform::errors::InvalidArgument(
"Filter must be with 4 or 5 dimensions, i.e. OIHW or "
"OIDHW, but got dimension = %d .",
filter->dims().size()));
PADDLE_ENFORCE_LE(filter->dims().size(), 5,
platform::errors::InvalidArgument(
"Filter must be with 4 or 5 dimensions, i.e. OIHW or "
"OIDHW, but got dimension = %d .",
filter->dims().size()));
if (bias) { if (bias) {
PADDLE_ENFORCE_EQ(bias->layout(), DataLayout::kMKLDNN, PADDLE_ENFORCE_EQ(
"Wrong layout set for Bias tensor"); bias->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument(
"The Bias tensor's layout should be %d, but got %d.",
DataLayout::kMKLDNN, bias->layout()));
PADDLE_ENFORCE_NE(bias->format(), MKLDNNMemoryFormat::undef, PADDLE_ENFORCE_NE(bias->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Bias tensor"); platform::errors::InvalidArgument(
"Got wrong format for Bias tensor."));
PADDLE_ENFORCE_EQ(bias->dims().size(), 1, PADDLE_ENFORCE_EQ(
"Bias must only have 1 dimension, i.e. X"); bias->dims().size(), 1,
platform::errors::InvalidArgument("Bias must only have 1 dimension, "
"i.e. X, but got dimension = %d .",
bias->dims().size()));
} }
std::vector<int> strides_temp = ctx.Attr<std::vector<int>>("strides"); std::vector<int> strides_temp = ctx.Attr<std::vector<int>>("strides");
...@@ -295,10 +317,16 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -295,10 +317,16 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
PADDLE_ENFORCE_NE( PADDLE_ENFORCE_NE(
residual_param_data, nullptr, residual_param_data, nullptr,
"Provide data if you want MKLDNN conv+elementwise_add fusion"); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(output->dims(), residual_param->dims(), "Provide data if you want MKLDNN conv+elementwise_add fusion"));
PADDLE_ENFORCE_EQ(
output->dims(), residual_param->dims(),
platform::errors::InvalidArgument(
"Output and elementwise parameter need to have the " "Output and elementwise parameter need to have the "
"same dimension sizes"); "same dimension sizes, "
"but got output's dimension = %d and residual param's dimension "
"= %d .",
output->dims().size(), residual_param->dims().size()));
if (residual_param->format() != handler.GetDstFormat()) { if (residual_param->format() != handler.GetDstFormat()) {
auto output_data = auto output_data =
...@@ -371,16 +399,23 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -371,16 +399,23 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto* output = ctx.Output<Tensor>("Output"); auto* output = ctx.Output<Tensor>("Output");
PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN, PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input tensor"); platform::errors::InvalidArgument(
"The input tensor's layout should be %d, but got %d.",
DataLayout::kMKLDNN, input->layout()));
PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef, PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input tensor"); platform::errors::InvalidArgument(
"Got wrong format for Input tensor."));
PADDLE_ENFORCE_GE(
input->dims().size(), 4, PADDLE_ENFORCE_GE(input->dims().size(), 4,
"Input must be with 4 or 5 dimensions, i.e. NCHW or NCDHW"); platform::errors::InvalidArgument(
PADDLE_ENFORCE_LE( "Input must be with 4 or 5 dimensions, i.e. NCHW or "
input->dims().size(), 5, "NCDHW, but got dimension = %d .",
"Input must be with 4 or 5 dimensions, i.e. NCHW or NCDHW"); input->dims().size()));
PADDLE_ENFORCE_LE(input->dims().size(), 5,
platform::errors::InvalidArgument(
"Input must be with 4 or 5 dimensions, i.e. NCHW or "
"NCDHW, but got dimension = %d .",
input->dims().size()));
std::string fuse_activation = ctx.Attr<std::string>("fuse_activation"); std::string fuse_activation = ctx.Attr<std::string>("fuse_activation");
bool fuse_residual_conn = ctx.Attr<bool>("fuse_residual_connection"); bool fuse_residual_conn = ctx.Attr<bool>("fuse_residual_connection");
...@@ -438,17 +473,25 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -438,17 +473,25 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto* filter = ctx.Input<Tensor>("Filter"); auto* filter = ctx.Input<Tensor>("Filter");
PADDLE_ENFORCE_EQ(filter->layout(), DataLayout::kMKLDNN, PADDLE_ENFORCE_EQ(
"Wrong layout set for Filter tensor"); filter->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument(
"The filter tensor's layout should be %d, but got %d.",
DataLayout::kMKLDNN, filter->layout()));
PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::undef, PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Filter tensor"); platform::errors::InvalidArgument(
"Got wrong format for Filter tensor."));
PADDLE_ENFORCE_GE(
filter->dims().size(), 4, PADDLE_ENFORCE_GE(filter->dims().size(), 4,
"Filter must be with 4 or 5 dimensions, i.e. OIHW or OIDHW"); platform::errors::InvalidArgument(
PADDLE_ENFORCE_LE( "Filter must be with 4 or 5 dimensions, i.e. OIHW "
filter->dims().size(), 5, "or OIDHW, but got dimensions = %d .",
"Filter must be with 4 or 5 dimensions, i.e. OIHW or OIDHW"); filter->dims().size()));
PADDLE_ENFORCE_LE(filter->dims().size(), 5,
platform::errors::InvalidArgument(
"Filter must be with 4 or 5 dimensions, i.e. OIHW "
"or OIDHW, but got dimensions = %d .",
filter->dims().size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
!fuse_residual_conn || !force_fp32_output, true, !fuse_residual_conn || !force_fp32_output, true,
...@@ -457,13 +500,20 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -457,13 +500,20 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto* bias = ctx.HasInput("Bias") ? ctx.Input<Tensor>("Bias") : nullptr; auto* bias = ctx.HasInput("Bias") ? ctx.Input<Tensor>("Bias") : nullptr;
if (bias) { if (bias) {
PADDLE_ENFORCE_EQ(bias->layout(), DataLayout::kMKLDNN, PADDLE_ENFORCE_EQ(
"Wrong layout set for Bias tensor"); bias->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument(
"The bias tensor's layout should be %d, but got %d.",
DataLayout::kMKLDNN, bias->layout()));
PADDLE_ENFORCE_NE(bias->format(), MKLDNNMemoryFormat::undef, PADDLE_ENFORCE_NE(bias->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Bias tensor"); platform::errors::InvalidArgument(
"Got wrong format for Bias tensor."));
PADDLE_ENFORCE_EQ(bias->dims().size(), 1, PADDLE_ENFORCE_EQ(bias->dims().size(), 1,
"Bias must only have 1 dimension, i.e. X"); platform::errors::InvalidArgument(
"Bias must only have 1 dimension, i.e. X, but "
"got dimension = %d .",
bias->dims().size()));
} }
std::vector<int> strides_temp = ctx.Attr<std::vector<int>>("strides"); std::vector<int> strides_temp = ctx.Attr<std::vector<int>>("strides");
...@@ -482,7 +532,9 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -482,7 +532,9 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
bool is_conv3d = strides.size() == 3U; bool is_conv3d = strides.size() == 3U;
PADDLE_ENFORCE_NE(is_conv3d, true, PADDLE_ENFORCE_NE(is_conv3d, true,
"int8 does not support conv3d currently"); platform::errors::InvalidArgument(
"int8 does not support conv3d currently, should "
"set param is_conv3d as False"));
auto input_dims = input->dims(); auto input_dims = input->dims();
auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size()); auto data_dims = framework::slice_ddim(input_dims, 2, input_dims.size());
...@@ -599,9 +651,13 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -599,9 +651,13 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
if (fuse_residual_conn) { if (fuse_residual_conn) {
auto residual_param = ctx.Input<Tensor>("ResidualData"); auto residual_param = ctx.Input<Tensor>("ResidualData");
PADDLE_ENFORCE_EQ(output->dims(), residual_param->dims(), PADDLE_ENFORCE_EQ(
output->dims(), residual_param->dims(),
platform::errors::InvalidArgument(
"Output and elementwise parameter need to have the " "Output and elementwise parameter need to have the "
"same dimension sizes"); "same dimension sizes, but got output's dimension = %d"
" and residual param's dimension =%d .",
output->dims().size(), residual_param->dims().size()));
auto residual_dt = auto residual_dt =
paddle::framework::ToMKLDNNDataType(residual_param->type()); paddle::framework::ToMKLDNNDataType(residual_param->type());
if (residual_param->format() != handler->GetDstFormat()) { if (residual_param->format() != handler->GetDstFormat()) {
...@@ -729,7 +785,7 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> { ...@@ -729,7 +785,7 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
public: public:
void Compute(const paddle::framework::ExecutionContext& ctx) const override { void Compute(const paddle::framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()),
"It must use CPUPlace."); platform::errors::InvalidArgument("It must use CPUPlace."));
auto& dev_ctx = auto& dev_ctx =
ctx.template device_context<platform::MKLDNNDeviceContext>(); ctx.template device_context<platform::MKLDNNDeviceContext>();
...@@ -743,23 +799,34 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> { ...@@ -743,23 +799,34 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
Tensor* filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter")); Tensor* filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN, PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Input tensor"); platform::errors::InvalidArgument(
"The input tensor's layout should be %d, but got %d.",
DataLayout::kMKLDNN, input->layout()));
PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef, PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input tensor"); platform::errors::InvalidArgument(
"Got wrong format for Input tensor."));
PADDLE_ENFORCE_EQ(filter->layout(), DataLayout::kMKLDNN, PADDLE_ENFORCE_EQ(
"Wrong layout set for Filter tensor"); filter->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument(
"The filter tensor's layout should be %d, but got %d.",
DataLayout::kMKLDNN, filter->layout()));
PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::undef, PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Filter tensor"); platform::errors::InvalidArgument(
"Got wrong format for Filter tensor."));
PADDLE_ENFORCE_EQ(output_grad->layout(), DataLayout::kMKLDNN, PADDLE_ENFORCE_EQ(
"Wrong layout set for output_grad tensor"); output_grad->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument(
"The output_grad tensor's layout should be %d, but got %d.",
DataLayout::kMKLDNN, output_grad->layout()));
PADDLE_ENFORCE_NE(output_grad->format(), MKLDNNMemoryFormat::undef, PADDLE_ENFORCE_NE(output_grad->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for output_grad tensor"); "Wrong format set for output_grad tensor");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ctx.Attr<bool>("is_test"), false, ctx.Attr<bool>("is_test"), false,
"is_test attribute should be set to False in training phase."); platform::errors::InvalidArgument(
"is_test attribute should be set to False in training phase."));
if (!input_grad && !filter_grad) return; if (!input_grad && !filter_grad) return;
...@@ -859,7 +926,8 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> { ...@@ -859,7 +926,8 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel<T> {
std::static_pointer_cast<mkldnn::convolution_forward::primitive_desc>( std::static_pointer_cast<mkldnn::convolution_forward::primitive_desc>(
dev_ctx.GetBlob(key_conv_pd)); dev_ctx.GetBlob(key_conv_pd));
PADDLE_ENFORCE_NE(conv_pd, nullptr, PADDLE_ENFORCE_NE(conv_pd, nullptr,
"Fail to find conv_pd in device context"); platform::errors::InvalidArgument(
"Fail to find conv_pd in device context"));
auto mkldnn_paddings = platform::ToMkldnnPadding(paddings); auto mkldnn_paddings = platform::ToMkldnnPadding(paddings);
......
...@@ -30,12 +30,13 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -30,12 +30,13 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
public: public:
void Compute(const paddle::framework::ExecutionContext& ctx) const override { void Compute(const paddle::framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()),
"It must use CPUPlace."); platform::errors::InvalidArgument("It must use CPUPlace."));
const bool is_test = ctx.Attr<bool>("is_test"); const bool is_test = ctx.Attr<bool>("is_test");
PADDLE_ENFORCE( PADDLE_ENFORCE_EQ(is_test, true,
is_test == true, platform::errors::InvalidArgument(
"ConvTransposeMKLDNN works only for inference!. Set is_test = True"); "ConvTransposeMKLDNN works only for inference. "
"Set is_test = True. but got is_test=False ."));
auto& dev_ctx = auto& dev_ctx =
ctx.template device_context<paddle::platform::MKLDNNDeviceContext>(); ctx.template device_context<paddle::platform::MKLDNNDeviceContext>();
...@@ -46,29 +47,49 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> { ...@@ -46,29 +47,49 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel<T> {
auto* bias = ctx.HasInput("Bias") ? ctx.Input<Tensor>("Bias") : nullptr; auto* bias = ctx.HasInput("Bias") ? ctx.Input<Tensor>("Bias") : nullptr;
auto* output = ctx.Output<Tensor>("Output"); auto* output = ctx.Output<Tensor>("Output");
PADDLE_ENFORCE_EQ(input->layout(), DataLayout::kMKLDNN, PADDLE_ENFORCE_EQ(
"Wrong layout set for Input tensor"); input->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument(
"Got wrong layout = %d for Input tensor.", input->layout()));
PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef, PADDLE_ENFORCE_NE(input->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Input tensor"); platform::errors::InvalidArgument(
"Got wrong format for Input tensor."));
PADDLE_ENFORCE_EQ(filter->layout(), DataLayout::kMKLDNN,
"Wrong layout set for Filter tensor"); PADDLE_ENFORCE_EQ(
filter->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument(
"The filter tensor's laytout should be %d, but got %d.",
DataLayout::kMKLDNN, filter->layout()));
PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::undef, PADDLE_ENFORCE_NE(filter->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Filter tensor"); platform::errors::InvalidArgument(
"Got wrong formats for Filter tensor."));
PADDLE_ENFORCE_EQ(input->dims().size(), 4,
"Input must be with 4 dimensions, i.e. NCHW"); PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE_EQ(filter->dims().size(), 4, input->dims().size(), 4,
"Filter must be with 4 dimensions, i.e. OIHW"); platform::errors::InvalidArgument(
"Input must be with 4 dimensions, i.e. NCHW. but got dimension =%d",
input->dims().size()));
PADDLE_ENFORCE_EQ(
filter->dims().size(), 4,
platform::errors::InvalidArgument("Filter must be with 4 dimensions, "
"i.e. OIHW, but got dimension =%d",
filter->dims().size()));
if (bias) { if (bias) {
PADDLE_ENFORCE_EQ(bias->layout(), DataLayout::kMKLDNN, PADDLE_ENFORCE_EQ(
"Wrong layout set for Bias tensor"); bias->layout(), DataLayout::kMKLDNN,
platform::errors::InvalidArgument(
"The bias tensor's laytout should be %d, but got %d.",
DataLayout::kMKLDNN, bias->layout()));
PADDLE_ENFORCE_NE(bias->format(), MKLDNNMemoryFormat::undef, PADDLE_ENFORCE_NE(bias->format(), MKLDNNMemoryFormat::undef,
"Wrong format set for Bias tensor"); platform::errors::InvalidArgument(
"Got wrong format for Bias tensor."));
PADDLE_ENFORCE_EQ(bias->dims().size(), 1,
"Bias must only have 1 dimension, i.e. X"); PADDLE_ENFORCE_EQ(
bias->dims().size(), 1,
platform::errors::InvalidArgument("Bias must only have 1 dimension, "
"i.e. X, but got dimension = %d .",
bias->dims().size()));
} }
std::vector<int> strides_temp = ctx.Attr<std::vector<int>>("strides"); std::vector<int> strides_temp = ctx.Attr<std::vector<int>>("strides");
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册