提交 4e3ba65f 编写于 作者: Y yangyaming

Refine doc.

上级 12596a16
...@@ -23,19 +23,15 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { ...@@ -23,19 +23,15 @@ class SmoothL1LossOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext& ctx) const override { void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "X must be initialized.");
"Input of SmoothL1LossOp must be initialized."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Y must be initialized.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"),
"Target of SmoothL1LossOp must be initialized.");
auto* x = ctx.Input<framework::Tensor>("X"); auto* x = ctx.Input<framework::Tensor>("X");
auto* y = ctx.Input<framework::Tensor>("Y"); auto* y = ctx.Input<framework::Tensor>("Y");
PADDLE_ENFORCE_EQ(x->dims(), y->dims(), PADDLE_ENFORCE_EQ(x->dims(), y->dims(),
"Dimensions of SmoothL1LossOp's input and target " "The shape of X and Y must be the same.");
"must be same.");
PADDLE_ENFORCE_GE(x->dims().size(), 2, PADDLE_ENFORCE_GE(x->dims().size(), 2,
"Tensor rank of SmoothL1LossOp's input must be " "The tensor rank of X must be at least 2.");
"at least 2.");
auto* inside_weight = ctx.Input<framework::Tensor>("InsideWeight"); auto* inside_weight = ctx.Input<framework::Tensor>("InsideWeight");
if (inside_weight) { if (inside_weight) {
auto* outside_weight = ctx.Input<framework::Tensor>("OutsideWeight"); auto* outside_weight = ctx.Input<framework::Tensor>("OutsideWeight");
...@@ -43,10 +39,9 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { ...@@ -43,10 +39,9 @@ class SmoothL1LossOp : public framework::OperatorWithKernel {
"If weights are provided, must specify both " "If weights are provided, must specify both "
"inside and outside weights."); "inside and outside weights.");
PADDLE_ENFORCE_EQ(inside_weight->dims(), x->dims(), PADDLE_ENFORCE_EQ(inside_weight->dims(), x->dims(),
"Dimensions of inside weight must be same with input."); "The shape of InsideWeight must be same as X.");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(outside_weight->dims(), x->dims(),
outside_weight->dims(), x->dims(), "The shape of OutsideWeight must be same as X.");
"Dimensions of outside weight must be same with input.");
} }
auto* diff = ctx.Output<framework::LoDTensor>("Diff"); auto* diff = ctx.Output<framework::LoDTensor>("Diff");
...@@ -63,21 +58,37 @@ class SmoothL1LossOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -63,21 +58,37 @@ class SmoothL1LossOpMaker : public framework::OpProtoAndCheckerMaker {
SmoothL1LossOpMaker(framework::OpProto* proto, SmoothL1LossOpMaker(framework::OpProto* proto,
framework::OpAttrChecker* op_checker) framework::OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of SmoothL1LossOp."); AddInput("X",
AddInput("Y", "Target of SmoothL1LossOp."); "The input tensor of smooth l1 loss op."
AddInput("InsideWeight", "Optional input to scale (X-Y)."); "The rank should be greater or equal to 2 with shape "
AddInput("OutsideWeight", "Optinal input to scale smooth l1 loss."); "[batch_size, value_dim1, value_dim2, ..., value_dimN]");
AddOutput("Diff", "Intermediate variable to cache Win*(X-Y).") AddInput("Y",
"The target tensor of smooth l1 loss op "
"with the same shape as X.");
AddInput("InsideWeight",
"Optional input tensor of smooth l1 loss op with the same shape "
"as X. If provided, the result of (X - Y) will be multiplied "
"by this tensor element by element.");
AddInput("OutsideWeight",
"Optinal input of smooth l1 loss op with the same shape as X."
"If provided, the output smooth l1 loss will be multiplied by "
"this tensor element by element.");
AddOutput("Diff", "Intermediate variable to cache InsideWeight*(X-Y).")
.AsIntermediate(); .AsIntermediate();
AddOutput("Out", "Final smooth l1 loss of inputs."); AddOutput("Out", "Smooth l1 loss.");
AddAttr<AttrType>("sigma", "Hyper parameter, default value is 3.0 .") AddAttr<AttrType>("sigma",
"Hyper parameter of smooth l1 loss op."
"A float scalar with default value 3.0.")
.SetDefault(3.0); .SetDefault(3.0);
AddComment(R"DOC( AddComment(R"DOC(
Compute SmoothL1Loss for input and target. Compute smooth l1 loss for input and target. The operator take the 1st
dimension of input as batch size. For each instance, it will compute
smooth l1 loss element by element first and sum all losses to one value.
So the output shape is [batch_size, 1].
The equation is: The equation is:
loss = 0.5 * (sigma * (x - y)) ^ 2 if abs(x - y) < 1 / sigma^2 loss = 0.5 * (sigma * (x-y))^2 if abs(x - y) < 1 / sigma^2
abs(x - y) - 0.5 / sigma^2 otherwise abs(x - y) - 0.5 / sigma^2 otherwise
)DOC"); )DOC");
} }
...@@ -98,12 +109,12 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { ...@@ -98,12 +109,12 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel {
ctx.Output<framework::LoDTensor>(framework::GradVarName("Y")); ctx.Output<framework::LoDTensor>(framework::GradVarName("Y"));
PADDLE_ENFORCE_GE(out_dims.size(), 2, PADDLE_ENFORCE_GE(out_dims.size(), 2,
"Tensor rank of output gradient should be 2."); "The tensor rank of Input(Out@Grad) should be 2.");
PADDLE_ENFORCE_EQ(out_dims[0], in_dims[0], PADDLE_ENFORCE_EQ(out_dims[0], in_dims[0],
"First dimension of ouptut gradient must be " "The 1st dimension of Input(Out@Grad) must be "
"same with input."); "same as input.");
PADDLE_ENFORCE_EQ(out_dims[1], 1, PADDLE_ENFORCE_EQ(out_dims[1], 1,
"Second dimension of output gradient must be 1."); "The 2nd dimension of Input(Out@Grad) must be 1.");
if (x_grad) x_grad->Resize(in_dims); if (x_grad) x_grad->Resize(in_dims);
if (y_grad) y_grad->Resize(in_dims); if (y_grad) y_grad->Resize(in_dims);
......
...@@ -59,7 +59,7 @@ class SmoothL1LossKernel : public framework::OpKernel { ...@@ -59,7 +59,7 @@ class SmoothL1LossKernel : public framework::OpKernel {
out1->mutable_data<T>(context.GetPlace()); out1->mutable_data<T>(context.GetPlace());
auto place = context.GetEigenDevice<Place>(); auto place = context.GetEigenDevice<Place>();
auto sigma = static_cast<T>(context.op().Attr<AttrType>("sigma")); auto sigma = static_cast<T>(context.Attr<AttrType>("sigma"));
T sigma2 = sigma * sigma; T sigma2 = sigma * sigma;
bool has_weight = (in2 != nullptr) && (in3 != nullptr); bool has_weight = (in2 != nullptr) && (in3 != nullptr);
...@@ -122,7 +122,7 @@ class SmoothL1LossGradKernel : public framework::OpKernel { ...@@ -122,7 +122,7 @@ class SmoothL1LossGradKernel : public framework::OpKernel {
auto* in1 = context.Input<Tensor>("OutsideWeight"); auto* in1 = context.Input<Tensor>("OutsideWeight");
auto* in2 = context.Input<Tensor>("Diff"); auto* in2 = context.Input<Tensor>("Diff");
auto* og = context.Input<Tensor>(framework::GradVarName("Out")); auto* og = context.Input<Tensor>(framework::GradVarName("Out"));
auto sigma = static_cast<T>(context.op().Attr<AttrType>("sigma")); auto sigma = static_cast<T>(context.Attr<AttrType>("sigma"));
T sigma2 = sigma * sigma; T sigma2 = sigma * sigma;
bool has_weight = (in0 != nullptr) && (in1 != nullptr); bool has_weight = (in0 != nullptr) && (in1 != nullptr);
......
...@@ -14,7 +14,7 @@ def smooth_l1_loss_forward(val, sigma2): ...@@ -14,7 +14,7 @@ def smooth_l1_loss_forward(val, sigma2):
class TestSmoothL1LossOp1(OpTest): class TestSmoothL1LossOp1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "smooth_l1_loss" self.op_type = "smooth_l1_loss"
dims = (6, 10) dims = (5, 10)
self.inputs = { self.inputs = {
'X': np.random.random(dims).astype("float32"), 'X': np.random.random(dims).astype("float32"),
'Y': np.random.random(dims).astype("float32") 'Y': np.random.random(dims).astype("float32")
...@@ -35,17 +35,17 @@ class TestSmoothL1LossOp1(OpTest): ...@@ -35,17 +35,17 @@ class TestSmoothL1LossOp1(OpTest):
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
['Y'], 'Out', max_relative_error=0.02, no_grad_set=set("X")) ['Y'], 'Out', max_relative_error=0.03, no_grad_set=set("X"))
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
self.check_grad( self.check_grad(
['X'], 'Out', max_relative_error=0.02, no_grad_set=set('Y')) ['X'], 'Out', max_relative_error=0.03, no_grad_set=set('Y'))
class TestSmoothL1LossOp2(OpTest): class TestSmoothL1LossOp2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "smooth_l1_loss" self.op_type = "smooth_l1_loss"
dims = (6, 10) dims = (5, 10)
self.inputs = { self.inputs = {
'X': np.random.random(dims).astype("float32"), 'X': np.random.random(dims).astype("float32"),
'Y': np.random.random(dims).astype("float32"), 'Y': np.random.random(dims).astype("float32"),
...@@ -66,20 +66,20 @@ class TestSmoothL1LossOp2(OpTest): ...@@ -66,20 +66,20 @@ class TestSmoothL1LossOp2(OpTest):
self.check_output() self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.02) self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.03)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
['Y'], ['Y'],
'Out', 'Out',
max_relative_error=0.02, max_relative_error=0.03,
no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight'])) no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight']))
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
self.check_grad( self.check_grad(
['X'], ['X'],
'Out', 'Out',
max_relative_error=0.02, max_relative_error=0.03,
no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight'])) no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight']))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册