From 1e981d0d401dbc0df4d93a51b78c0ea80d7babcd Mon Sep 17 00:00:00 2001 From: houj04 <35131887+houj04@users.noreply.github.com> Date: Wed, 7 Sep 2022 18:48:15 +0800 Subject: [PATCH] [XPU] update xdnn to 0907. (#45777) * [XPU] update xdnn to 0906. test=kunlun * [XPU] update xdnn to 0907. test=kunlun --- cmake/external/xpu.cmake | 4 ++-- paddle/phi/kernels/xpu/rnn_grad_kernel.cc | 4 ++-- paddle/phi/kernels/xpu/rnn_kernel.cc | 2 +- paddle/phi/kernels/xpu/rnn_util.h | 8 ++++---- .../fluid/tests/unittests/xpu/get_test_cover_info.py | 5 ++++- 5 files changed, 13 insertions(+), 10 deletions(-) diff --git a/cmake/external/xpu.cmake b/cmake/external/xpu.cmake index 32a6ec18714..677fed84dcb 100644 --- a/cmake/external/xpu.cmake +++ b/cmake/external/xpu.cmake @@ -10,7 +10,7 @@ set(XPU_RT_LIB_NAME "libxpurt.so") if(NOT DEFINED XPU_BASE_URL) set(XPU_BASE_URL_WITHOUT_DATE "https://baidu-kunlun-product.cdn.bcebos.com/KL-SDK/klsdk-dev") - set(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20220831") + set(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20220907") else() set(XPU_BASE_URL "${XPU_BASE_URL}") endif() @@ -19,7 +19,7 @@ endif() if(NOT DEFINED XPU_XDNN_BASE_URL) set(XPU_XDNN_BASE_URL_WITHOUT_DATE "https://klx-sdk-release-public.su.bcebos.com/xdnn/dev") - set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL_WITHOUT_DATE}/20220831") + set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL_WITHOUT_DATE}/20220907") else() set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL}") endif() diff --git a/paddle/phi/kernels/xpu/rnn_grad_kernel.cc b/paddle/phi/kernels/xpu/rnn_grad_kernel.cc index bc68fa6f15d..c7ba5e7f765 100644 --- a/paddle/phi/kernels/xpu/rnn_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/rnn_grad_kernel.cc @@ -92,14 +92,14 @@ void RnnGradKernel(const Context& dev_ctx, std::vector> parameter_lists; parameter_lists.resize(num_layers); - reset_parameter_vector(weight_list, num_layers, is_bidirec, ¶meter_lists); + ResetParameterVector(weight_list, num_layers, is_bidirec, ¶meter_lists); for (unsigned int i = 0; i < weight_grad_list.size(); ++i) { dev_ctx.template Alloc(weight_grad_list[i]); } std::vector> parameter_lists_grad; parameter_lists_grad.resize(num_layers); - reset_parameter_vector( + ResetParameterVector( weight_grad_list, num_layers, is_bidirec, ¶meter_lists_grad); // allocate the memory and initization the x_grad diff --git a/paddle/phi/kernels/xpu/rnn_kernel.cc b/paddle/phi/kernels/xpu/rnn_kernel.cc index 6d9234b38f1..9465839043a 100644 --- a/paddle/phi/kernels/xpu/rnn_kernel.cc +++ b/paddle/phi/kernels/xpu/rnn_kernel.cc @@ -89,7 +89,7 @@ void RnnKernel(const Context& dev_ctx, // weightlist std::vector> parameter_lists; parameter_lists.resize(num_layers); - reset_parameter_vector(weight_list, num_layers, is_bidirec, ¶meter_lists); + ResetParameterVector(weight_list, num_layers, is_bidirec, ¶meter_lists); // init the output and allocate the memory dev_ctx.template Alloc(out); diff --git a/paddle/phi/kernels/xpu/rnn_util.h b/paddle/phi/kernels/xpu/rnn_util.h index c42cb1309d1..5310b35e64d 100644 --- a/paddle/phi/kernels/xpu/rnn_util.h +++ b/paddle/phi/kernels/xpu/rnn_util.h @@ -19,10 +19,10 @@ namespace phi { template -void reset_parameter_vector(const std::vector& raw_params_vec, - const int& num_layers, - const bool& is_bidirec, - std::vector>* params_vec) { +void ResetParameterVector(const std::vector& raw_params_vec, + const int& num_layers, + const bool& is_bidirec, + std::vector>* params_vec) { // the parameter raw seuquence is [FWhi, FWhh, BWhi, BWhh] * num_layers // + [FBhi, FBhh, BBhi, BBhh] * num_layers, we will reset the parameter to // ([FWhi, FWhh, FBhi, FBhh] + [BWhi, BWhh, BBhi, BBhh]) * num_layers diff --git a/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py b/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py index 2258d354f96..16c309e0c14 100644 --- a/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py +++ b/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py @@ -91,7 +91,10 @@ xpu_test_op_type_white_list = [ "lamb_float16", "lars_momentum_float32", "resnet_unit", - "resnet_unit_grad" + "resnet_unit_grad", + "c_embedding_float32", # unittests of collective ops do not using xpu testing framework + "c_sync_comm_stream_float32", + "c_sync_calc_stream_float32", ] xpu_test_device_op_white_list = [] xpu_test_device_op_type_white_list = [] -- GitLab