提交 477a6a09 编写于 作者: G guosheng

Refine reduce_op, follow comments and remove ReduceGradEigenFreeKernel

上级 1295e5ef
...@@ -18,7 +18,6 @@ namespace paddle { ...@@ -18,7 +18,6 @@ namespace paddle {
namespace operators { namespace operators {
using framework::Tensor; using framework::Tensor;
using framework::LoDTensor;
class ReduceOp : public framework::OperatorWithKernel { class ReduceOp : public framework::OperatorWithKernel {
public: public:
...@@ -46,7 +45,11 @@ class ReduceOp : public framework::OperatorWithKernel { ...@@ -46,7 +45,11 @@ class ReduceOp : public framework::OperatorWithKernel {
dims_vector.erase(dims_vector.begin() + dim); dims_vector.erase(dims_vector.begin() + dim);
} }
auto out_dims = framework::make_ddim(dims_vector); auto out_dims = framework::make_ddim(dims_vector);
ctx.Output<framework::LoDTensor>("Out")->Resize(out_dims); ctx.Output<framework::Tensor>("Out")->Resize(out_dims);
if (dim != 0) {
// Only pass LoD when not reducing on the first dim
ctx.ShareLoD("X", /*->*/ "Out");
}
} }
}; };
...@@ -81,9 +84,12 @@ class ReduceOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -81,9 +84,12 @@ class ReduceOpMaker : public framework::OpProtoAndCheckerMaker {
"X", "X",
"(Tensor) The input tensor. Tensors with rank at most 6 are supported"); "(Tensor) The input tensor. Tensors with rank at most 6 are supported");
AddOutput("Out", "(Tensor) The result tensor."); AddOutput("Out", "(Tensor) The result tensor.");
AddAttr<int>("dim", AddAttr<int>(
"(int, default 0) The dimension to reduce. " "dim",
"Must be in the range [-rank(input), rank(input))") "(int, default 1) The dimension to reduce. "
"Must be in the range [-rank(input), rank(input)). "
"If `dim < 0`, the dim to reduce is `rank + dim`. "
"Noting that reducing on the first dim will make the LoD info lost.")
.SetDefault(0); .SetDefault(0);
AddAttr<bool>("keep_dim", AddAttr<bool>("keep_dim",
"(bool, default false) " "(bool, default false) "
......
...@@ -80,6 +80,8 @@ struct MaxOrMinGradFunctor { ...@@ -80,6 +80,8 @@ struct MaxOrMinGradFunctor {
auto equals = x == y.broadcast(dim); auto equals = x == y.broadcast(dim);
auto ones = dx.constant(1); auto ones = dx.constant(1);
auto zeros = dx.constant(0); auto zeros = dx.constant(0);
// If there are multiple minimum or maximum elements, the subgradient of
// each is the set [0, 1], and we pass gradient to all of them here.
dx.device(place) = dy.broadcast(dim) * equals.select(ones, zeros); dx.device(place) = dy.broadcast(dim) * equals.select(ones, zeros);
} }
}; };
...@@ -145,35 +147,34 @@ class ReduceGradKernel : public framework::OpKernel { ...@@ -145,35 +147,34 @@ class ReduceGradKernel : public framework::OpKernel {
int rank = context.Input<Tensor>("X")->dims().size(); int rank = context.Input<Tensor>("X")->dims().size();
switch (rank) { switch (rank) {
case 1: case 1:
ReduceCompute<1>(context); ReduceGradCompute<1>(context);
break; break;
case 2: case 2:
ReduceCompute<2>(context); ReduceGradCompute<2>(context);
break; break;
case 3: case 3:
ReduceCompute<3>(context); ReduceGradCompute<3>(context);
break; break;
case 4: case 4:
ReduceCompute<4>(context); ReduceGradCompute<4>(context);
break; break;
case 5: case 5:
ReduceCompute<5>(context); ReduceGradCompute<5>(context);
break; break;
case 6: case 6:
ReduceCompute<6>(context); ReduceGradCompute<6>(context);
break; break;
} }
} }
private: private:
template <size_t D> template <size_t D>
void ReduceCompute(const framework::ExecutionContext& context) const { void ReduceGradCompute(const framework::ExecutionContext& context) const {
auto* input0 = context.Input<Tensor>("X"); auto* input0 = context.Input<Tensor>("X");
auto* input1 = context.Input<Tensor>("Out"); auto* input1 = context.Input<Tensor>("Out");
auto* input2 = context.Input<Tensor>(framework::GradVarName("Out")); auto* input2 = context.Input<Tensor>(framework::GradVarName("Out"));
auto* output = context.Output<Tensor>(framework::GradVarName("X")); auto* output = context.Output<Tensor>(framework::GradVarName("X"));
if (output != nullptr) {
output->mutable_data<T>(context.GetPlace()); output->mutable_data<T>(context.GetPlace());
auto x = EigenTensor<T, D>::From(*input0); auto x = EigenTensor<T, D>::From(*input0);
auto x_grad = EigenTensor<T, D>::From(*output); auto x_grad = EigenTensor<T, D>::From(*output);
...@@ -193,55 +194,6 @@ class ReduceGradKernel : public framework::OpKernel { ...@@ -193,55 +194,6 @@ class ReduceGradKernel : public framework::OpKernel {
functor(place, x, x_reduce, x_grad, x_reduce_grad, braodcast_dim, functor(place, x, x_reduce, x_grad, x_reduce_grad, braodcast_dim,
braodcast_dim[dim]); braodcast_dim[dim]);
} }
}
};
// For EigenTensor unsupported reduce
template <typename T, typename Functor>
class ReduceGradEigenFreeKernel : public framework::OpKernel {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* out = context.Input<Tensor>("Out");
auto* x_grad = context.Output<Tensor>(framework::GradVarName("X"));
auto* out_grad = context.Input<Tensor>(framework::GradVarName("Out"));
if (x_grad != nullptr) {
DDim dims = x->dims();
int rank = dims.size();
int dim = static_cast<int>(context.Attr<int>("dim"));
if (dim < 0) dim = rank + dim;
auto* x_data = x->data<T>();
auto* x_grad_data = x_grad->mutable_data<T>(context.GetPlace());
auto* out_data = out->data<T>();
auto* out_grad_data = out_grad->data<T>();
int outer_count = 1;
int inner_count = 1;
int mid_count = dims[dim];
for (int i = 0; i < dim; ++i) {
outer_count *= dims[i];
}
for (int i = dim + 1; i < rank; ++i) {
inner_count *= dims[i];
}
int x_offset = 0; // offset on raw data
int out_offset = 0; // offset on reduced data
Functor functor;
for (int i = 0; i < outer_count; ++i) {
for (int j = 0; j < inner_count; ++j) {
out_offset = inner_count * i + j;
for (int k = 0; k < mid_count; ++k) {
x_offset = (inner_count * mid_count) * i + inner_count * k + j;
functor(x_data + x_offset, out_data + out_offset,
x_grad_data + x_offset, out_grad_data + out_offset,
mid_count);
}
}
}
}
}
}; };
} // namespace operators } // namespace operators
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册