提交 d6a0280e 编写于 作者: D dangqingqing

Enhance unit testing framework for operator with LoDTensor.

上级 40fba4aa
...@@ -60,7 +60,9 @@ class SequenceAvgPoolGradOp : public framework::OperatorWithKernel { ...@@ -60,7 +60,9 @@ class SequenceAvgPoolGradOp : public framework::OperatorWithKernel {
protected: protected:
void InferShape(const framework::InferShapeContext& ctx) const override { void InferShape(const framework::InferShapeContext& ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")),
"Gradient of Out should not be null"); "Gradient of Out should not be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"),
"The input X should not be null.");
auto og_dims = auto og_dims =
ctx.Input<framework::LoDTensor>(framework::GradVarName("Out"))->dims(); ctx.Input<framework::LoDTensor>(framework::GradVarName("Out"))->dims();
auto x_dims = ctx.Input<framework::LoDTensor>("X")->dims(); auto x_dims = ctx.Input<framework::LoDTensor>("X")->dims();
......
...@@ -21,6 +21,9 @@ namespace operators { ...@@ -21,6 +21,9 @@ namespace operators {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor; using LoDTensor = framework::LoDTensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename T, int MajorType = Eigen::RowMajor, template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex> typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>; using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
...@@ -43,8 +46,8 @@ class SequenceAvgPoolKernel : public framework::OpKernel { ...@@ -43,8 +46,8 @@ class SequenceAvgPoolKernel : public framework::OpKernel {
static_cast<int>(lod[0][i + 1])); static_cast<int>(lod[0][i + 1]));
Tensor out_t = out->Slice<T>(i, i + 1); Tensor out_t = out->Slice<T>(i, i + 1);
int64_t h = static_cast<int64_t>(lod[0][i + 1] - lod[0][i]); int64_t h = static_cast<int64_t>(lod[0][i + 1] - lod[0][i]);
auto in_e = EigenMatrix<T>::From(in_t, {h, w}); auto in_e = EigenMatrix<T>::From(in_t, framework::make_ddim({h, w}));
auto out_e = EigenMatrix<T>::From(out_t, {h, w}); auto out_e = EigenVector<T>::Flatten(out_t);
out_e.device(place) = in_e.mean(Eigen::array<int, 1>({{0}})); out_e.device(place) = in_e.mean(Eigen::array<int, 1>({{0}}));
} }
} }
...@@ -54,9 +57,9 @@ template <typename Place, typename T> ...@@ -54,9 +57,9 @@ template <typename Place, typename T>
class SequenceAvgPoolGradKernel : public framework::OpKernel { class SequenceAvgPoolGradKernel : public framework::OpKernel {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* in = context.Output<LoDTensor>("X"); auto* in = context.Input<LoDTensor>("X");
auto* in_g = context.Output<LoDTensor>(framework::GradVarName("X"));
auto* out_g = context.Input<LoDTensor>(framework::GradVarName("Out")); auto* out_g = context.Input<LoDTensor>(framework::GradVarName("Out"));
auto* in_g = context.Output<LoDTensor>(framework::GradVarName("X"));
auto dims = in->dims(); auto dims = in->dims();
auto lod = in->lod(); auto lod = in->lod();
...@@ -71,7 +74,7 @@ class SequenceAvgPoolGradKernel : public framework::OpKernel { ...@@ -71,7 +74,7 @@ class SequenceAvgPoolGradKernel : public framework::OpKernel {
int64_t h = static_cast<int64_t>(lod[0][i + 1] - lod[0][i]); int64_t h = static_cast<int64_t>(lod[0][i + 1] - lod[0][i]);
auto in_g_e = EigenMatrix<T>::From(in_g_t, {h, w}); auto in_g_e = EigenMatrix<T>::From(in_g_t, {h, w});
auto out_g_e = EigenMatrix<T>::From(out_g_t, {1, w}); auto out_g_e = EigenMatrix<T>::From(out_g_t, {1, w});
Eigen::DSizes<int, 2> bcast(h, w); Eigen::DSizes<int, 2> bcast(h, 1);
in_g_e.device(place) = (out_g_e / static_cast<T>(h)).broadcast(bcast); in_g_e.device(place) = (out_g_e / static_cast<T>(h)).broadcast(bcast);
} }
} }
......
...@@ -47,17 +47,24 @@ def set_input(scope, op, inputs, place): ...@@ -47,17 +47,24 @@ def set_input(scope, op, inputs, place):
if in_name in inputs: if in_name in inputs:
if in_dup: if in_dup:
sub_in = inputs[in_name] sub_in = inputs[in_name]
for sub_in_name, sub_in_array in sub_in: for sub_in_name, sub_in_val in sub_in:
var = scope.find_var(sub_in_name) var = scope.find_var(sub_in_name)
tensor = var.get_tensor() tensor = var.get_tensor()
sub_in_array = sub_in_val[0] \
if isinstance(sub_in_val, tuple) else sub_in_val
tensor.set_dims(sub_in_array.shape) tensor.set_dims(sub_in_array.shape)
tensor.set(sub_in_array, place) tensor.set(sub_in_array, place)
if isinstance(sub_in_val, tuple):
tensor.set_lod(sub_in_val[1])
else: else:
var = scope.find_var(in_name) var = scope.find_var(in_name)
tensor = var.get_tensor() tensor = var.get_tensor()
arr = inputs[in_name] in_val = inputs[in_name]
tensor.set_dims(arr.shape) in_array = in_val[0] if isinstance(in_val, tuple) else in_val
tensor.set(arr, place) tensor.set_dims(in_array.shape)
tensor.set(in_array, place)
if isinstance(in_val, tuple):
tensor.set_lod(in_val[1])
def set_output_grad(scope, op, outputs, place): def set_output_grad(scope, op, outputs, place):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册