未验证 提交 1d8a042e 编写于 作者: C chenhaoze 提交者: GitHub

OP clip, merge_lod_tensor, convert/elementwise error message enhancement (#23742) (#23944)

* OP clip, merge_lod_tensor, convert/elementwise error message enhancement. test=develop
上级 8bb19960
...@@ -99,8 +99,9 @@ class ElementwiseWeightOpConverter : public OpConverter { ...@@ -99,8 +99,9 @@ class ElementwiseWeightOpConverter : public OpConverter {
regist_eltwise_weight(scale_mode); regist_eltwise_weight(scale_mode);
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"TensorRT Dynamic shape unsupported weight shape for Elementwise " "The size of input bias's dims is %d, but TensorRT dynamic shape "
"op!")); "only support size = 1 for Elementwise op!",
Y_t->dims().size()));
} }
return; return;
} }
...@@ -132,12 +133,24 @@ class ElementwiseWeightOpConverter : public OpConverter { ...@@ -132,12 +133,24 @@ class ElementwiseWeightOpConverter : public OpConverter {
if (scale_mode == nvinfer1::ScaleMode::kCHANNEL) { if (scale_mode == nvinfer1::ScaleMode::kCHANNEL) {
for (size_t i = 1; i < no_batch_dims.size(); i++) { for (size_t i = 1; i < no_batch_dims.size(); i++) {
if (dims_y[i] != 1) if (dims_y[i] != 1)
PADDLE_THROW( PADDLE_THROW(platform::errors::InvalidArgument(
"TensorRT unsupported weight shape for Elementwise op!"); "The bias's %d dim is %d, but TensorRT dynamic shape only "
"support it equals to 1 for Elementwise op!",
i, dims_y[i]));
} }
} }
} else { } else {
PADDLE_THROW("TensorRT unsupported weight Shape for Elementwise op!"); if (dims_y.size() >= 1) {
PADDLE_THROW(platform::errors::InvalidArgument(
"The size of bias's dims is %d and bias's size is %d. TensorRT "
"doesn't support this shape for Elementwise op!",
dims_y.size(), dims_y[0]));
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The size of bias's dims is %d. TensorRT doesn't support "
"this shape for Elementwise op!",
dims_y.size()));
}
} }
regist_eltwise_weight(scale_mode); regist_eltwise_weight(scale_mode);
} }
...@@ -152,7 +165,11 @@ class ElementwiseTensorOpConverter : public OpConverter { ...@@ -152,7 +165,11 @@ class ElementwiseTensorOpConverter : public OpConverter {
void operator()(const framework::proto::OpDesc& op, void operator()(const framework::proto::OpDesc& op,
const framework::Scope& scope, bool test_mode) override { const framework::Scope& scope, bool test_mode) override {
auto op_pair = ops.find(op_type_); auto op_pair = ops.find(op_type_);
PADDLE_ENFORCE(op_pair != ops.end(), "Wrong elementwise op type!"); PADDLE_ENFORCE_NE(op_pair, ops.end(),
platform::errors::InvalidArgument(
"Elementwise op's type(%s) is not supported. Please "
"check if the op_type is correct.",
op_type_));
// Here the two nullptr looks strange, that's because the // Here the two nullptr looks strange, that's because the
// framework::OpDesc's constructor is strange. // framework::OpDesc's constructor is strange.
......
...@@ -23,14 +23,8 @@ class ClipOp : public framework::OperatorWithKernel { ...@@ -23,14 +23,8 @@ class ClipOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "clip");
platform::errors::InvalidArgument( OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "clip");
"Input(X) of ClipOp should not be null. Please check "
"if it is created correctly."));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
platform::errors::InvalidArgument(
"Output(Out) of ClipOp should not be null. Please "
"check if it is created correctly."));
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto max = ctx->Attrs().Get<float>("max"); auto max = ctx->Attrs().Get<float>("max");
auto min = ctx->Attrs().Get<float>("min"); auto min = ctx->Attrs().Get<float>("min");
...@@ -75,14 +69,9 @@ class ClipOpGrad : public framework::OperatorWithKernel { ...@@ -75,14 +69,9 @@ class ClipOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ( OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "clip_grad");
ctx->HasInput("X"), true, OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
platform::errors::InvalidArgument("Input(X) should not be null. Please " "Out@GRAD", "clip_grad");
"check if it is created correctly."));
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
platform::errors::InvalidArgument(
"Input(Out@GRAD) should not be null. Please check if "
"it is created correctly."));
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
if (ctx->HasOutput(framework::GradVarName("X"))) { if (ctx->HasOutput(framework::GradVarName("X"))) {
ctx->SetOutputDim(framework::GradVarName("X"), x_dims); ctx->SetOutputDim(framework::GradVarName("X"), x_dims);
......
...@@ -178,17 +178,15 @@ class MergeLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { ...@@ -178,17 +178,15 @@ class MergeLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker {
class MergeLoDTensorInferShape : public framework::InferShapeBase { class MergeLoDTensorInferShape : public framework::InferShapeBase {
public: public:
void operator()(framework::InferShapeContext *context) const override { void operator()(framework::InferShapeContext *context) const override {
PADDLE_ENFORCE(context->HasInput("X"), OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", "merge_lod_tensor");
"MergeLoDTensorOp must have input X."); OP_INOUT_CHECK(context->HasInput("Mask"), "Input", "Mask",
PADDLE_ENFORCE(context->HasInput("Mask"), "merge_lod_tensor");
"MergeLoDTensorOp must have input Mask."); OP_INOUT_CHECK(context->HasInput("InTrue"), "Input", "InTrue",
PADDLE_ENFORCE(context->HasInput("InTrue"), "merge_lod_tensor");
"MergeLoDTensorOp must have input InTrue."); OP_INOUT_CHECK(context->HasInput("InFalse"), "Input", "InFalse",
PADDLE_ENFORCE(context->HasInput("InFalse"), "merge_lod_tensor");
"MergeLoDTensorOp must have input InFalse."); OP_INOUT_CHECK(context->HasOutput("Out"), "Output", "Out",
PADDLE_ENFORCE(context->HasOutput("Out"), "merge_lod_tensor");
"MergeLoDTensorOp must have output Out");
auto mask_dim = context->GetInputDim("Mask"); auto mask_dim = context->GetInputDim("Mask");
PADDLE_ENFORCE_EQ(mask_dim.size(), 2, PADDLE_ENFORCE_EQ(mask_dim.size(), 2,
"If you are using IfElse OP:" "If you are using IfElse OP:"
......
...@@ -165,11 +165,11 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0): ...@@ -165,11 +165,11 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0):
to merge the output if True block and False Block. to merge the output if True block and False Block.
Args: Args:
in_true(tuple|list|None): The True branch to be merged. in_true(Variable|tuple|list|None): The True branch to be merged.
in_false(tuple|list|None): The False branch to be merged. in_false(Variable|tuple|list|None): The False branch to be merged.
x(tuple|list|None): The input tensor that contains complete x(Variable|tuple|list|None): The input tensor that contains complete
lod information needed to construct the output. lod information needed to construct the output.
mask(list): A bool column vector which masks the input. mask(Variable|list): A bool column vector which masks the input.
level(int): The specific lod level to merge. level(int): The specific lod level to merge.
Returns: Returns:
...@@ -192,6 +192,13 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0): ...@@ -192,6 +192,13 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0):
in_true=out_true, in_false=out_false, mask=y, x=x, level=level) in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
""" """
helper = LayerHelper('merge_lod_tensor', **locals()) helper = LayerHelper('merge_lod_tensor', **locals())
check_type(x, 'x', (Variable, list, tuple, type(None)),
'fluid.layers.merge_lod_tensor')
check_type(mask, 'mask', (Variable, list), 'fluid.layers.merge_lod_tensor')
check_type(in_true, 'in_true', (Variable, list, tuple, type(None)),
'fluid.layers.merge_lod_tensor')
check_type(in_false, 'in_false', (Variable, list, tuple, type(None)),
'fluid.layers.merge_lod_tensor')
out = helper.create_variable_for_type_inference(dtype=in_true.dtype) out = helper.create_variable_for_type_inference(dtype=in_true.dtype)
helper.append_op( helper.append_op(
type='merge_lod_tensor', type='merge_lod_tensor',
......
...@@ -11604,6 +11604,7 @@ def clip(x, min, max, name=None): ...@@ -11604,6 +11604,7 @@ def clip(x, min, max, name=None):
""" """
helper = LayerHelper("clip", **locals()) helper = LayerHelper("clip", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'clip')
if name is None: if name is None:
name = unique_name.generate_with_ignorable_key(".".join( name = unique_name.generate_with_ignorable_key(".".join(
......
...@@ -16,6 +16,8 @@ from __future__ import print_function ...@@ -16,6 +16,8 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from op_test import OpTest from op_test import OpTest
...@@ -69,5 +71,22 @@ class TestCase3(TestClipOp): ...@@ -69,5 +71,22 @@ class TestCase3(TestClipOp):
self.min = 0.2 self.min = 0.2
class TestClipOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
input_data = np.random.random((2, 4)).astype("float32")
def test_Variable():
fluid.layers.clip(x=input_data, min=-1.0, max=1.0)
self.assertRaises(TypeError, test_Variable)
def test_dtype():
x2 = fluid.layers.data(name='x2', shape=[1], dtype='int32')
fluid.layers.clip(x=x2, min=-1.0, max=1.0)
self.assertRaises(TypeError, test_dtype)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
from __future__ import print_function from __future__ import print_function
import unittest import unittest
from paddle.fluid import Program, program_guard
import paddle.fluid.core as core import paddle.fluid.core as core
import numpy as np import numpy as np
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
...@@ -221,6 +222,60 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): ...@@ -221,6 +222,60 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase):
self.assertAlmostEqual(1.0, g_out_sum, delta=0.1) self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
class TestMergeLodTensorOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
input_data = layers.data(
name='x', shape=[1], dtype='float32', stop_gradient=False)
y = layers.data(
name='y', shape=[1], dtype='bool', stop_gradient=False)
x_true = layers.data(
name='x_true', shape=[1], dtype='float32', stop_gradient=False)
x_false = layers.data(
name='x_false', shape=[1], dtype='float32', stop_gradient=False)
level = 0
def test_x():
out = merge_lod_tensor(
int_true=x_true,
in_false=x_false,
x=set(),
mask=y,
level=level)
self.assertRaises(TypeError, test_x)
def test_mask():
out = merge_lod_tensor(
int_true=x_true,
in_false=x_false,
x=input_data,
mask=set(),
level=level)
self.assertRaises(TypeError, test_mask)
def test_xtrue():
out = merge_lod_tensor(
int_true=set(),
in_false=x_false,
x=input_data,
mask=y,
level=level)
self.assertRaises(TypeError, test_xtrue)
def test_xfalse():
out = merge_lod_tensor(
int_true=x_true,
in_false=set(),
x=input_data,
mask=y,
level=level)
self.assertRaises(TypeError, test_xfalse)
class TestSplitLodTensorWithError(unittest.TestCase): class TestSplitLodTensorWithError(unittest.TestCase):
def test_error(self): def test_error(self):
main_program = Program() main_program = Program()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册