未验证 提交 1d8a042e 编写于 作者: C chenhaoze 提交者: GitHub

OP clip, merge_lod_tensor, convert/elementwise error message enhancement (#23742) (#23944)

* OP clip, merge_lod_tensor, convert/elementwise error message enhancement. test=develop
上级 8bb19960
......@@ -99,8 +99,9 @@ class ElementwiseWeightOpConverter : public OpConverter {
regist_eltwise_weight(scale_mode);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"TensorRT Dynamic shape unsupported weight shape for Elementwise "
"op!"));
"The size of input bias's dims is %d, but TensorRT dynamic shape "
"only support size = 1 for Elementwise op!",
Y_t->dims().size()));
}
return;
}
......@@ -132,12 +133,24 @@ class ElementwiseWeightOpConverter : public OpConverter {
if (scale_mode == nvinfer1::ScaleMode::kCHANNEL) {
for (size_t i = 1; i < no_batch_dims.size(); i++) {
if (dims_y[i] != 1)
PADDLE_THROW(
"TensorRT unsupported weight shape for Elementwise op!");
PADDLE_THROW(platform::errors::InvalidArgument(
"The bias's %d dim is %d, but TensorRT dynamic shape only "
"support it equals to 1 for Elementwise op!",
i, dims_y[i]));
}
}
} else {
PADDLE_THROW("TensorRT unsupported weight Shape for Elementwise op!");
if (dims_y.size() >= 1) {
PADDLE_THROW(platform::errors::InvalidArgument(
"The size of bias's dims is %d and bias's size is %d. TensorRT "
"doesn't support this shape for Elementwise op!",
dims_y.size(), dims_y[0]));
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The size of bias's dims is %d. TensorRT doesn't support "
"this shape for Elementwise op!",
dims_y.size()));
}
}
regist_eltwise_weight(scale_mode);
}
......@@ -152,7 +165,11 @@ class ElementwiseTensorOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope, bool test_mode) override {
auto op_pair = ops.find(op_type_);
PADDLE_ENFORCE(op_pair != ops.end(), "Wrong elementwise op type!");
PADDLE_ENFORCE_NE(op_pair, ops.end(),
platform::errors::InvalidArgument(
"Elementwise op's type(%s) is not supported. Please "
"check if the op_type is correct.",
op_type_));
// Here the two nullptr looks strange, that's because the
// framework::OpDesc's constructor is strange.
......
......@@ -23,14 +23,8 @@ class ClipOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
platform::errors::InvalidArgument(
"Input(X) of ClipOp should not be null. Please check "
"if it is created correctly."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument(
"Output(Out) of ClipOp should not be null. Please "
"check if it is created correctly."));
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "clip");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "clip");
auto x_dims = ctx->GetInputDim("X");
auto max = ctx->Attrs().Get<float>("max");
auto min = ctx->Attrs().Get<float>("min");
......@@ -75,14 +69,9 @@ class ClipOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(
ctx->HasInput("X"), true,
platform::errors::InvalidArgument("Input(X) should not be null. Please "
"check if it is created correctly."));
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
platform::errors::InvalidArgument(
"Input(Out@GRAD) should not be null. Please check if "
"it is created correctly."));
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "clip_grad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
"Out@GRAD", "clip_grad");
auto x_dims = ctx->GetInputDim("X");
if (ctx->HasOutput(framework::GradVarName("X"))) {
ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
......
......@@ -178,17 +178,15 @@ class MergeLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker {
class MergeLoDTensorInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("X"),
"MergeLoDTensorOp must have input X.");
PADDLE_ENFORCE(context->HasInput("Mask"),
"MergeLoDTensorOp must have input Mask.");
PADDLE_ENFORCE(context->HasInput("InTrue"),
"MergeLoDTensorOp must have input InTrue.");
PADDLE_ENFORCE(context->HasInput("InFalse"),
"MergeLoDTensorOp must have input InFalse.");
PADDLE_ENFORCE(context->HasOutput("Out"),
"MergeLoDTensorOp must have output Out");
OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", "merge_lod_tensor");
OP_INOUT_CHECK(context->HasInput("Mask"), "Input", "Mask",
"merge_lod_tensor");
OP_INOUT_CHECK(context->HasInput("InTrue"), "Input", "InTrue",
"merge_lod_tensor");
OP_INOUT_CHECK(context->HasInput("InFalse"), "Input", "InFalse",
"merge_lod_tensor");
OP_INOUT_CHECK(context->HasOutput("Out"), "Output", "Out",
"merge_lod_tensor");
auto mask_dim = context->GetInputDim("Mask");
PADDLE_ENFORCE_EQ(mask_dim.size(), 2,
"If you are using IfElse OP:"
......
......@@ -165,11 +165,11 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0):
to merge the output if True block and False Block.
Args:
in_true(tuple|list|None): The True branch to be merged.
in_false(tuple|list|None): The False branch to be merged.
x(tuple|list|None): The input tensor that contains complete
in_true(Variable|tuple|list|None): The True branch to be merged.
in_false(Variable|tuple|list|None): The False branch to be merged.
x(Variable|tuple|list|None): The input tensor that contains complete
lod information needed to construct the output.
mask(list): A bool column vector which masks the input.
mask(Variable|list): A bool column vector which masks the input.
level(int): The specific lod level to merge.
Returns:
......@@ -192,6 +192,13 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0):
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
"""
helper = LayerHelper('merge_lod_tensor', **locals())
check_type(x, 'x', (Variable, list, tuple, type(None)),
'fluid.layers.merge_lod_tensor')
check_type(mask, 'mask', (Variable, list), 'fluid.layers.merge_lod_tensor')
check_type(in_true, 'in_true', (Variable, list, tuple, type(None)),
'fluid.layers.merge_lod_tensor')
check_type(in_false, 'in_false', (Variable, list, tuple, type(None)),
'fluid.layers.merge_lod_tensor')
out = helper.create_variable_for_type_inference(dtype=in_true.dtype)
helper.append_op(
type='merge_lod_tensor',
......
......@@ -11604,6 +11604,7 @@ def clip(x, min, max, name=None):
"""
helper = LayerHelper("clip", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'clip')
if name is None:
name = unique_name.generate_with_ignorable_key(".".join(
......
......@@ -16,6 +16,8 @@ from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from op_test import OpTest
......@@ -69,5 +71,22 @@ class TestCase3(TestClipOp):
self.min = 0.2
class TestClipOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
input_data = np.random.random((2, 4)).astype("float32")
def test_Variable():
fluid.layers.clip(x=input_data, min=-1.0, max=1.0)
self.assertRaises(TypeError, test_Variable)
def test_dtype():
x2 = fluid.layers.data(name='x2', shape=[1], dtype='int32')
fluid.layers.clip(x=x2, min=-1.0, max=1.0)
self.assertRaises(TypeError, test_dtype)
if __name__ == '__main__':
unittest.main()
......@@ -15,6 +15,7 @@
from __future__ import print_function
import unittest
from paddle.fluid import Program, program_guard
import paddle.fluid.core as core
import numpy as np
import paddle.fluid.layers as layers
......@@ -221,6 +222,60 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase):
self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
class TestMergeLodTensorOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
input_data = layers.data(
name='x', shape=[1], dtype='float32', stop_gradient=False)
y = layers.data(
name='y', shape=[1], dtype='bool', stop_gradient=False)
x_true = layers.data(
name='x_true', shape=[1], dtype='float32', stop_gradient=False)
x_false = layers.data(
name='x_false', shape=[1], dtype='float32', stop_gradient=False)
level = 0
def test_x():
out = merge_lod_tensor(
int_true=x_true,
in_false=x_false,
x=set(),
mask=y,
level=level)
self.assertRaises(TypeError, test_x)
def test_mask():
out = merge_lod_tensor(
int_true=x_true,
in_false=x_false,
x=input_data,
mask=set(),
level=level)
self.assertRaises(TypeError, test_mask)
def test_xtrue():
out = merge_lod_tensor(
int_true=set(),
in_false=x_false,
x=input_data,
mask=y,
level=level)
self.assertRaises(TypeError, test_xtrue)
def test_xfalse():
out = merge_lod_tensor(
int_true=x_true,
in_false=set(),
x=input_data,
mask=y,
level=level)
self.assertRaises(TypeError, test_xfalse)
class TestSplitLodTensorWithError(unittest.TestCase):
def test_error(self):
main_program = Program()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册