diff --git a/paddle/fluid/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc index 9eba127a9b3ceace225e3d3dcf867df518c4477e..68a4d09f3b92dcaa81390014640ddfa1afeb31dc 100644 --- a/paddle/fluid/operators/concat_op.cc +++ b/paddle/fluid/operators/concat_op.cc @@ -253,19 +253,7 @@ REGISTER_OPERATOR(concat_grad, ops::ConcatOpGrad, ops::ConcatDoubleGradOpMaker, ops::ConcatDoubleGradOpMaker, ops::ConcatOpGradNoNeedBufferVarInferer); -REGISTER_OP_CPU_KERNEL( - concat, ops::ConcatKernel, - ops::ConcatKernel, - ops::ConcatKernel, - ops::ConcatKernel, - ops::ConcatKernel, - ops::ConcatKernel, - ops::ConcatKernel, - ops::ConcatKernel>, - ops::ConcatKernel>); + REGISTER_OP_CPU_KERNEL( concat_grad, ops::ConcatGradKernel, diff --git a/paddle/fluid/operators/concat_op.cu.cc b/paddle/fluid/operators/concat_op.cu.cc index 2be763298575490932f6cd3867dd59cee0ebeba7..3aee2c6504fdd7bf2a803f8757a974728aa262dd 100644 --- a/paddle/fluid/operators/concat_op.cu.cc +++ b/paddle/fluid/operators/concat_op.cu.cc @@ -18,18 +18,7 @@ limitations under the License. */ namespace ops = paddle::operators; namespace plat = paddle::platform; -REGISTER_OP_CUDA_KERNEL( - concat, ops::ConcatKernel, - ops::ConcatKernel, - ops::ConcatKernel, - ops::ConcatKernel, - ops::ConcatKernel, - ops::ConcatKernel, - ops::ConcatKernel, - ops::ConcatKernel>, - ops::ConcatKernel>); + REGISTER_OP_CUDA_KERNEL( concat_grad, ops::ConcatGradKernel, diff --git a/paddle/fluid/operators/concat_op.h b/paddle/fluid/operators/concat_op.h index 1d9c10bdb8cc6a698a4a1b6ab376e90b67eb2a03..7b53b9df6f95134f3aaafa7c34bef71eaf805d3c 100644 --- a/paddle/fluid/operators/concat_op.h +++ b/paddle/fluid/operators/concat_op.h @@ -39,54 +39,6 @@ static inline int64_t ComputeAxis(int64_t axis, int64_t rank) { } return axis > 0 ? axis : 0; } - -template -class ConcatKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& ctx) const override { - auto ins = ctx.MultiInput("X"); - framework::LoDTensor* out = ctx.Output("Out"); - PADDLE_ENFORCE_NOT_NULL(ins[0], - platform::errors::NotFound( - "The first input tensor is not initalized.")); - auto axis = ctx.Attr("axis"); - bool need_resize_out_dims = false; - if (ctx.HasInput("AxisTensor")) { - auto* axis_tensor = ctx.Input("AxisTensor"); - axis = GetDataFromTensor(axis_tensor)[0]; - need_resize_out_dims = true; - } - axis = ComputeAxis(static_cast(axis), - static_cast(ins[0]->dims().size())); - - if (need_resize_out_dims) { - const size_t n = ins.size(); - std::vector ins_dims(n); - for (size_t i = 0; i < n; i++) { - ins_dims[i] = ins[i]->dims(); - } - - framework::DDim out_dims = - pten::funcs::ComputeAndCheckShape(true, ins_dims, axis); - out->Resize(out_dims); - } - auto place = ctx.GetPlace(); - out->mutable_data(place); - - // call new kernel - auto& dev_ctx = ctx.device_context(); - std::vector pt_ins; - for (auto& in : ins) { - pt_ins.push_back(*in); - } - - pten::ConcatKernel( - static_cast::TYPE&>(dev_ctx), - pt_ins, axis, out); - } -}; - template class ConcatGradKernel : public framework::OpKernel { public: diff --git a/paddle/fluid/operators/tensor_array_to_tensor_op.cc b/paddle/fluid/operators/tensor_array_to_tensor_op.cc index eb20e1c2cd2748a5ab4db28df0c4798837c7bf21..fa49f254d972a38ad54922c8a303654dedc36682 100644 --- a/paddle/fluid/operators/tensor_array_to_tensor_op.cc +++ b/paddle/fluid/operators/tensor_array_to_tensor_op.cc @@ -299,7 +299,7 @@ class TensorArrayToTensorGradOpMaker : public framework::SingleGradOpMaker { } // namespace operators } // namespace paddle -USE_OP(concat); +USE_OP_ITSELF(concat); namespace ops = paddle::operators; REGISTER_OPERATOR(