From a2387ef2e5df0b463b4a435bce31e212c5d51202 Mon Sep 17 00:00:00 2001 From: TTerror Date: Mon, 12 Apr 2021 09:26:07 +0800 Subject: [PATCH] fix concat_grad on kunlun (#32151) * fix concat_grad on kunlun * fix concat_grad on kunlun --- cmake/external/xpu.cmake | 2 +- paddle/fluid/operators/concat_op_xpu.cc | 19 ++++++------------- 2 files changed, 7 insertions(+), 14 deletions(-) diff --git a/cmake/external/xpu.cmake b/cmake/external/xpu.cmake index 16c69a7b503..f846623602e 100644 --- a/cmake/external/xpu.cmake +++ b/cmake/external/xpu.cmake @@ -13,7 +13,7 @@ if(NOT XPU_SDK_ROOT) elseif(WITH_SUNWAY) SET(XPU_URL "https://baidu-kunlun-public.su.bcebos.com/paddle_depence/sunway/xpu_2021_01_13.tar.gz" CACHE STRING "" FORCE) else() - SET(XPU_URL "https://baidu-kunlun-public.su.bcebos.com/paddle_depence/xpu_2021_03_30.tar.gz" CACHE STRING "" FORCE) + SET(XPU_URL "https://baidu-kunlun-public.su.bcebos.com/paddle_depence/xpu_2021_04_09.tar.gz" CACHE STRING "" FORCE) endif() SET(XPU_SOURCE_DIR "${THIRD_PARTY_PATH}/xpu") diff --git a/paddle/fluid/operators/concat_op_xpu.cc b/paddle/fluid/operators/concat_op_xpu.cc index aa0002cc6d1..be299babdba 100644 --- a/paddle/fluid/operators/concat_op_xpu.cc +++ b/paddle/fluid/operators/concat_op_xpu.cc @@ -132,16 +132,14 @@ class ConcatGradXPUKernel : public framework::OpKernel { axis = ComputeAxis(static_cast(axis), static_cast(ins[0]->dims().size())); // get output tensor that the name is not kEmptyVarName - std::vector outputs; - std::vector choose_idx; - int n = 0; + std::vector ptrs(outs.size()); for (size_t j = 0; j < outs.size(); ++j) { if (out_var_names[j] != framework::kEmptyVarName && outs[j]->numel() != 0UL) { outs[j]->mutable_data(ctx.GetPlace()); - outputs.push_back(outs[j]); - choose_idx.push_back(j); - n++; + ptrs[j] = outs[j]->data(); + } else { + ptrs[j] = nullptr; } } PADDLE_ENFORCE_GE(axis, 0, platform::errors::InvalidArgument( @@ -157,10 +155,10 @@ class ConcatGradXPUKernel : public framework::OpKernel { axis, out_grad->dims().size())); auto input_dims = ins[0]->dims(); - std::vector split_list(n); + std::vector split_list(ins.size()); std::vector xdims_list(input_dims.size()); int total_length = 0; - for (int i = 0; i < n; ++i) { + for (size_t i = 0; i < ins.size(); ++i) { split_list[i] = ins[i]->dims()[axis]; total_length += ins[i]->dims()[axis]; } @@ -172,11 +170,6 @@ class ConcatGradXPUKernel : public framework::OpKernel { } xdims_list[axis] = total_length; - std::vector ptrs(n); - for (int i = 0; i < n; ++i) { - ptrs[i] = outputs[i]->data(); - } - auto& dev_ctx = ctx.template device_context(); int r = xpu::split(dev_ctx.x_context(), out_grad->data(), ptrs, xdims_list, split_list, axis); -- GitLab