提交 d0521e6f 编写于 作者: L Luo Tao

Merge branch 'develop' into use_op

...@@ -25,16 +25,30 @@ class CosSimOp : public framework::OperatorWithKernel { ...@@ -25,16 +25,30 @@ class CosSimOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
// notnull check
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null.");
PADDLE_ENFORCE_EQ(ctx.Input<Tensor>("X")->dims(),
ctx.Input<Tensor>("Y")->dims(), // shape check
"Dimensions of Input(X) and Input(Y) must be the same."); auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims();
auto dims = ctx.Input<Tensor>("X")->dims();
ctx.Output<Tensor>("Out")->Resize({dims[0], 1}); PADDLE_ENFORCE_EQ(x_dims.size(), y_dims.size(),
ctx.Output<Tensor>("XNorm")->Resize({dims[0], 1}); "Ranks of Input(X) and Input(Y) must be equal.");
ctx.Output<Tensor>("YNorm")->Resize({dims[0], 1}); PADDLE_ENFORCE_GE(x_dims.size(), 2,
"Rank of Input(X) must not be less than 2.");
PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 1, x_dims.size()),
framework::slice_ddim(y_dims, 1, y_dims.size()),
"All dimensions except the 1st of Input(X) and Input(Y) "
"must be equal.");
PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1,
"The 1st dimension of Input(Y) must be equal to Input(X) or"
" just 1 (which will be broadcasted to match Input(X)).");
// resize tensor
ctx.Output<Tensor>("Out")->Resize({x_dims[0], 1});
ctx.Output<Tensor>("XNorm")->Resize({x_dims[0], 1});
ctx.Output<Tensor>("YNorm")->Resize({y_dims[0], 1});
} }
}; };
...@@ -42,16 +56,27 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -42,16 +56,27 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
CosSimOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) CosSimOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) { : OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The first input of cos_sim op."); AddInput("X", "The 1st input of cos_sim op.");
AddInput("Y", "The second input of cos_sim op."); AddInput("Y", "The 2nd input of cos_sim op.");
AddOutput("Out", "The output of cos_sim op."); AddOutput("Out", "The output of cos_sim op.");
AddOutput("XNorm", "Row norm of the first input.").AsIntermediate(); AddOutput("XNorm",
AddOutput("YNorm", "Row norm of the second input.").AsIntermediate(); "Norm of the first input, reduced along the 1st "
"dimension.")
.AsIntermediate();
AddOutput("YNorm",
"Norm of the second input, reduced along the 1st "
"dimension.")
.AsIntermediate();
AddComment(R"DOC( AddComment(R"DOC(
Cosine Similarity Operator. Cosine Similarity Operator.
The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)) The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)).
Input(X) and Input(Y) must have the same shape, except that the 1st dimension
of Input(Y) could be just 1 (different from Input(X)), which will be
broadcasted to match the shape of Input(X) before computing their cosine
similarity.
)DOC"); )DOC");
} }
}; };
...@@ -62,32 +87,50 @@ class CosSimOpGrad : public framework::OperatorWithKernel { ...@@ -62,32 +87,50 @@ class CosSimOpGrad : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(const framework::InferShapeContext &ctx) const override {
// notnull check
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Y"), "Input(Y) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("XNorm"), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("XNorm"),
"Input(XNorm) must not be null."); "Input(XNorm) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("YNorm"), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("YNorm"),
"Input(YNorm) must not be null."); "Input(YNorm) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Out"),
"Input(Out) must not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Input(Out@GRAD) must not be null."); "Input(Out@GRAD) must not be null.");
// shape check
auto x_dims = ctx.Input<Tensor>("X")->dims(); auto x_dims = ctx.Input<Tensor>("X")->dims();
auto y_dims = ctx.Input<Tensor>("Y")->dims(); auto y_dims = ctx.Input<Tensor>("Y")->dims();
auto xnorm_dims = ctx.Input<Tensor>("XNorm")->dims(); auto xnorm_dims = ctx.Input<Tensor>("XNorm")->dims();
auto ynorm_dims = ctx.Input<Tensor>("YNorm")->dims(); auto ynorm_dims = ctx.Input<Tensor>("YNorm")->dims();
auto out_dims = ctx.Input<Tensor>(framework::GradVarName("Out"))->dims(); auto out_dims = ctx.Input<Tensor>("Out")->dims();
PADDLE_ENFORCE_EQ(x_dims, y_dims, auto out_grad_dims =
"Dimensions of Input(X) and Input(Y) must be the same."); ctx.Input<Tensor>(framework::GradVarName("Out"))->dims();
PADDLE_ENFORCE_EQ(xnorm_dims[0], x_dims[0],
"1st dimension of XNorm must equal that of Input(X)."); PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(),
PADDLE_ENFORCE_EQ(xnorm_dims[1], 1, "2st dimension of XNorm must be one."); "Ranks of Input(X) and Input(Y) must be equal.");
PADDLE_ENFORCE_EQ(ynorm_dims[0], y_dims[0], PADDLE_ENFORCE_GE(x_dims.size(), 2,
"1st dimension of YNorm must equal that of Input(Y)."); "Rank of Input(X) must not be less than 2.");
PADDLE_ENFORCE_EQ(ynorm_dims[1], 1, "2st dimension of YNorm must be one."); PADDLE_ENFORCE_EQ(framework::slice_ddim(x_dims, 1, x_dims.size()),
PADDLE_ENFORCE_EQ(out_dims[0], x_dims[0], framework::slice_ddim(y_dims, 1, y_dims.size()),
"1st dimension of Out@GRAD must equal that of Input(X)"); "All dimensions except the 1st of Input(X) and Input(Y) "
PADDLE_ENFORCE_EQ(out_dims[1], 1, "1st dimension of Out@GRAD must be one."); "must be equal.");
PADDLE_ENFORCE(x_dims[0] == y_dims[0] || y_dims[0] == 1,
"The 1st dimension of Input(Y) must be equal to Input(X) or"
" just 1 (which will be broadcasted to match Input(X)).");
auto target_xnorm_dims = framework::make_ddim({x_dims[0], 1});
auto target_ynorm_dims = framework::make_ddim({y_dims[0], 1});
PADDLE_ENFORCE_EQ(xnorm_dims, target_xnorm_dims,
"Shape of Input(XNorm) must be [X.Dim(0), 1].");
PADDLE_ENFORCE_EQ(ynorm_dims, target_ynorm_dims,
"Shape of Input(YNorm) must be [Y.Dim(0), 1].");
PADDLE_ENFORCE_EQ(out_dims, target_xnorm_dims,
"Shape of Input(Out) must be [X.Dim(0), 1].");
PADDLE_ENFORCE_EQ(out_grad_dims, target_xnorm_dims,
"Shape of Input(Out@Grad) must be [X.Dim(0), 1].");
// resize tensor
auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto *x_grad = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *y_grad = ctx.Output<Tensor>(framework::GradVarName("Y")); auto *y_grad = ctx.Output<Tensor>(framework::GradVarName("Y"));
if (x_grad) x_grad->Resize(x_dims); if (x_grad) x_grad->Resize(x_dims);
......
...@@ -31,30 +31,38 @@ template <typename Place, typename T> ...@@ -31,30 +31,38 @@ template <typename Place, typename T>
class CosSimKernel : public framework::OpKernel { class CosSimKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* input_x = context.Input<Tensor>("X"); // get Tensor
auto* input_y = context.Input<Tensor>("Y"); auto* in_x = context.Input<Tensor>("X");
auto* output_z = context.Output<Tensor>("Out"); auto* in_y = context.Input<Tensor>("Y");
auto* output_x_norm = context.Output<Tensor>("XNorm"); auto* out_z = context.Output<Tensor>("Out");
auto* output_y_norm = context.Output<Tensor>("YNorm"); auto* out_x_norm = context.Output<Tensor>("XNorm");
auto* out_y_norm = context.Output<Tensor>("YNorm");
out_z->mutable_data<T>(context.GetPlace());
out_x_norm->mutable_data<T>(context.GetPlace());
out_y_norm->mutable_data<T>(context.GetPlace());
output_z->mutable_data<T>(context.GetPlace()); // convert Tensor to Eigen Tensor
output_x_norm->mutable_data<T>(context.GetPlace()); int rows_x = in_x->dims()[0];
output_y_norm->mutable_data<T>(context.GetPlace()); int rows_y = in_y->dims()[0];
auto x = EigenMatrix<T>::Reshape(*in_x, 1);
auto dims = input_x->dims(); auto y = EigenMatrix<T>::Reshape(*in_y, 1);
int64_t size = input_x->numel(); auto z = EigenVector<T>::Flatten(*out_z);
auto new_dims = framework::make_ddim({dims[0], size / dims[0]}); auto x_norm = EigenVector<T>::Flatten(*out_x_norm);
auto x = EigenMatrix<T>::From(*input_x, new_dims); auto y_norm = EigenVector<T>::Flatten(*out_y_norm);
auto y = EigenMatrix<T>::From(*input_y, new_dims);
auto z = EigenVector<T>::Flatten(*output_z);
auto x_norm = EigenVector<T>::Flatten(*output_x_norm);
auto y_norm = EigenVector<T>::Flatten(*output_y_norm);
// compute
auto place = context.GetEigenDevice<Place>(); auto place = context.GetEigenDevice<Place>();
auto xy = (x * y).sum(Eigen::array<int, 1>({{1}})); auto row_along = Eigen::array<int, 1>({{1}});
x_norm.device(place) = x.square().sum(Eigen::array<int, 1>({{1}})).sqrt(); x_norm.device(place) = x.square().sum(row_along).sqrt();
y_norm.device(place) = y.square().sum(Eigen::array<int, 1>({{1}})).sqrt(); y_norm.device(place) = y.square().sum(row_along).sqrt();
z.device(place) = xy / x_norm / y_norm; if (rows_x == rows_y) {
auto xy = (x * y).sum(Eigen::array<int, 1>({1}));
z.device(place) = xy / x_norm / y_norm;
} else {
Eigen::DSizes<int, 2> bcast(rows_x, 1);
auto xy = (x * y.broadcast(bcast)).sum(row_along);
z.device(place) = xy / x_norm / y_norm.broadcast(bcast);
}
} }
}; };
...@@ -62,43 +70,72 @@ template <typename Place, typename T> ...@@ -62,43 +70,72 @@ template <typename Place, typename T>
class CosSimGradKernel : public framework::OpKernel { class CosSimGradKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* input_x = context.Input<Tensor>("X"); // get Tensor
auto* input_y = context.Input<Tensor>("Y"); auto* in_x = context.Input<Tensor>("X");
auto* input_z = context.Input<Tensor>("Out"); auto* in_y = context.Input<Tensor>("Y");
auto* input_x_norm = context.Input<Tensor>("XNorm"); auto* in_z = context.Input<Tensor>("Out");
auto* input_y_norm = context.Input<Tensor>("YNorm"); auto* in_x_norm = context.Input<Tensor>("XNorm");
auto* output_grad_x = context.Output<Tensor>(framework::GradVarName("X")); auto* in_y_norm = context.Input<Tensor>("YNorm");
auto* output_grad_y = context.Output<Tensor>(framework::GradVarName("Y")); auto* out_grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* input_grad_z = context.Input<Tensor>(framework::GradVarName("Out")); auto* out_grad_y = context.Output<Tensor>(framework::GradVarName("Y"));
auto* in_grad_z = context.Input<Tensor>(framework::GradVarName("Out"));
auto dims = input_x->dims(); // convert Tensor to Eigen Tensor
int64_t size = input_x->numel(); auto x = EigenMatrix<T>::Reshape(*in_x, 1);
auto new_dims = framework::make_ddim({dims[0], size / dims[0]}); auto y = EigenMatrix<T>::Reshape(*in_y, 1);
auto x = EigenMatrix<T>::From(*input_x, new_dims); auto z = EigenMatrix<T>::Reshape(*in_z, 1);
auto y = EigenMatrix<T>::From(*input_y, new_dims); auto x_norm = EigenMatrix<T>::Reshape(*in_x_norm, 1);
auto z = EigenMatrix<T>::From(*input_z); auto y_norm = EigenMatrix<T>::Reshape(*in_y_norm, 1);
auto x_norm = EigenMatrix<T>::From(*input_x_norm); auto dz = EigenMatrix<T>::Reshape(*in_grad_z, 1);
auto y_norm = EigenMatrix<T>::From(*input_y_norm);
auto dz = EigenMatrix<T>::From(*input_grad_z);
Eigen::DSizes<int, 2> bcast(1, new_dims[1]); // compute gradident
auto z_bcast = z.broadcast(bcast); int rows_x = in_x->dims()[0];
auto dz_bcast = dz.broadcast(bcast); int rows_y = in_y->dims()[0];
int cols = framework::product(in_x->dims()) / rows_x;
Eigen::DSizes<int, 2> bcast_cols(1, cols);
auto z_bcast = z.broadcast(bcast_cols);
auto dz_bcast = dz.broadcast(bcast_cols);
auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast_cols);
auto place = context.GetEigenDevice<Place>(); auto place = context.GetEigenDevice<Place>();
auto x_snorm_bcast = x_norm.square().eval().broadcast(bcast); if (rows_x == rows_y) {
auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast); auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_cols);
auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast); auto norm_prod_bcast = (x_norm * y_norm).eval().broadcast(bcast_cols);
if (output_grad_x) { // compute dx
output_grad_x->mutable_data<T>(context.GetPlace()); if (out_grad_x) {
auto dx = EigenMatrix<T>::From(*output_grad_x, new_dims); out_grad_x->mutable_data<T>(context.GetPlace());
dx.device(place) = auto dx = EigenMatrix<T>::Reshape(*out_grad_x, 1);
dz_bcast * (y / norm_prod_bcast - z_bcast * x / x_snorm_bcast); auto grad = y / norm_prod_bcast - z_bcast * x / x_snorm_bcast;
} dx.device(place) = dz_bcast * grad;
if (output_grad_y) { }
output_grad_y->mutable_data<T>(context.GetPlace()); // compute dy
auto dy = EigenMatrix<T>::From(*output_grad_y, new_dims); if (out_grad_y) {
dy.device(place) = out_grad_y->mutable_data<T>(context.GetPlace());
dz_bcast * (x / norm_prod_bcast - z_bcast * y / y_snorm_bcast); auto dy = EigenMatrix<T>::Reshape(*out_grad_y, 1);
auto grad = x / norm_prod_bcast - z_bcast * y / y_snorm_bcast;
dy.device(place) = dz_bcast * grad;
}
} else {
Eigen::DSizes<int, 2> bcast_rows(rows_x, 1);
Eigen::DSizes<int, 2> bcast_rows_cols(rows_x, cols);
auto y_bcast = y.broadcast(bcast_rows);
auto y_snorm_bcast = y_norm.square().eval().broadcast(bcast_rows_cols);
auto norm_prod_bcast = (x_norm * y_norm.eval().broadcast(bcast_rows))
.eval()
.broadcast(bcast_cols);
// compute dx
if (out_grad_x) {
out_grad_x->mutable_data<T>(context.GetPlace());
auto dx = EigenMatrix<T>::Reshape(*out_grad_x, 1);
auto grad = y_bcast / norm_prod_bcast - z_bcast * x / x_snorm_bcast;
dx.device(place) = dz_bcast * grad;
}
// compute dy
if (out_grad_y) {
out_grad_y->mutable_data<T>(context.GetPlace());
auto dy = EigenMatrix<T>::Reshape(*out_grad_y, 1);
auto grad = x / norm_prod_bcast - z_bcast * y_bcast / y_snorm_bcast;
dy.device(place) = (dz_bcast * grad).sum(Eigen::array<int, 1>({0}));
}
} }
} }
}; };
......
...@@ -29,7 +29,7 @@ class OnehotCrossEntropyOp : public framework::OperatorWithKernel { ...@@ -29,7 +29,7 @@ class OnehotCrossEntropyOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(X->dims().size(), 2, "X's dimension must be 2."); PADDLE_ENFORCE_EQ(X->dims().size(), 2, "X's dimension must be 2.");
PADDLE_ENFORCE_EQ(label->dims().size(), 1, "label's dimension must be 1."); PADDLE_ENFORCE_EQ(label->dims().size(), 1, "label's dimension must be 1.");
PADDLE_ENFORCE_EQ(X->dims()[0], label->dims()[0]); PADDLE_ENFORCE_EQ(X->dims()[0], label->dims()[0]);
ctx.Output<Tensor>("Y")->Resize({X->dims()[0]}); ctx.Output<Tensor>("Y")->Resize({X->dims()[0], 1});
} }
}; };
......
...@@ -7,8 +7,8 @@ class TestCosSimOp(OpTest): ...@@ -7,8 +7,8 @@ class TestCosSimOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "cos_sim" self.op_type = "cos_sim"
self.inputs = { self.inputs = {
'X': np.random.random((10, 5)).astype("float32"), 'X': np.random.random((6, 5)).astype("float32"),
'Y': np.random.random((10, 5)).astype("float32") 'Y': np.random.random((6, 5)).astype("float32")
} }
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1) expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1)
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1) expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1)
...@@ -28,12 +28,66 @@ class TestCosSimOp(OpTest): ...@@ -28,12 +28,66 @@ class TestCosSimOp(OpTest):
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
['Y'], 'Out', max_relative_error=0.05, no_grad_set=set('X')) ['Y'], 'Out', max_relative_error=0.05, no_grad_set=set("X"))
def test_check_grad_ignore_y(self): def test_check_grad_ingore_y(self):
self.check_grad( self.check_grad(
['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y')) ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Y'))
if __name__ == "__main__": class TestCosSimOp2(TestCosSimOp):
def setUp(self):
self.op_type = "cos_sim"
self.inputs = {
'X': np.random.random((6, 5)).astype("float32"),
'Y': np.random.random((1, 5)).astype("float32")
}
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=1)
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=1)
expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=1) / \
expect_x_norm / expect_y_norm
self.outputs = {
'XNorm': np.expand_dims(expect_x_norm, 1),
'YNorm': np.expand_dims(expect_y_norm, 1),
'Out': np.expand_dims(expect_out, 1)
}
class TestCosSimOp3(TestCosSimOp):
def setUp(self):
self.op_type = "cos_sim"
self.inputs = {
'X': np.random.random((6, 5, 2)).astype("float32"),
'Y': np.random.random((6, 5, 2)).astype("float32")
}
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2))
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2))
expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=(1, 2)) / \
expect_x_norm / expect_y_norm
self.outputs = {
'XNorm': np.expand_dims(expect_x_norm, 1),
'YNorm': np.expand_dims(expect_y_norm, 1),
'Out': np.expand_dims(expect_out, 1)
}
class TestCosSimOp4(TestCosSimOp):
def setUp(self):
self.op_type = "cos_sim"
self.inputs = {
'X': np.random.random((6, 5, 2)).astype("float32"),
'Y': np.random.random((1, 5, 2)).astype("float32")
}
expect_x_norm = np.linalg.norm(self.inputs['X'], axis=(1, 2))
expect_y_norm = np.linalg.norm(self.inputs['Y'], axis=(1, 2))
expect_out = (self.inputs['X'] * self.inputs['Y']).sum(axis=(1, 2)) / \
expect_x_norm / expect_y_norm
self.outputs = {
'XNorm': np.expand_dims(expect_x_norm, 1),
'YNorm': np.expand_dims(expect_y_norm, 1),
'Out': np.expand_dims(expect_out, 1)
}
if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -8,20 +8,22 @@ class TestCrossEntropy(OpTest): ...@@ -8,20 +8,22 @@ class TestCrossEntropy(OpTest):
self.op_type = "onehot_cross_entropy" self.op_type = "onehot_cross_entropy"
batch_size = 30 batch_size = 30
class_num = 10 class_num = 10
X = numpy.random.uniform(0.1, 1.0, X = numpy.random.uniform(0.1, 1.0,
[batch_size, class_num]).astype("float32") [batch_size, class_num]).astype("float32")
label = (class_num / 2) * numpy.ones(batch_size).astype("int32") labels = numpy.random.randint(0, class_num, batch_size, dtype="int32")
self.inputs = {'X': X, 'label': label}
Y = [] cross_entropy = numpy.asmatrix(
for i in range(0, batch_size): [[-numpy.log(X[i][labels[i]])] for i in range(X.shape[0])],
Y.append(-numpy.log(X[i][label[i]])) dtype="float32")
self.outputs = {'Y': numpy.array(Y).astype("float32")} self.inputs = {"X": X, "label": labels}
self.outputs = {"Y": cross_entropy}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y') self.check_grad(["X"], "Y")
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -22,7 +22,7 @@ class TestPadOp(OpTest): ...@@ -22,7 +22,7 @@ class TestPadOp(OpTest):
self.check_output() self.check_output()
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', max_relative_error=0.006)
def initTestCase(self): def initTestCase(self):
self.shape = (16, 16) self.shape = (16, 16)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册