diff --git a/cmake/external/xpu.cmake b/cmake/external/xpu.cmake index 32a6ec18714a4ac6426066753a6272b53ba37abf..677fed84dcb4c3360b00db440a0c8c6e0d484d65 100644 --- a/cmake/external/xpu.cmake +++ b/cmake/external/xpu.cmake @@ -10,7 +10,7 @@ set(XPU_RT_LIB_NAME "libxpurt.so") if(NOT DEFINED XPU_BASE_URL) set(XPU_BASE_URL_WITHOUT_DATE "https://baidu-kunlun-product.cdn.bcebos.com/KL-SDK/klsdk-dev") - set(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20220831") + set(XPU_BASE_URL "${XPU_BASE_URL_WITHOUT_DATE}/20220907") else() set(XPU_BASE_URL "${XPU_BASE_URL}") endif() @@ -19,7 +19,7 @@ endif() if(NOT DEFINED XPU_XDNN_BASE_URL) set(XPU_XDNN_BASE_URL_WITHOUT_DATE "https://klx-sdk-release-public.su.bcebos.com/xdnn/dev") - set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL_WITHOUT_DATE}/20220831") + set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL_WITHOUT_DATE}/20220907") else() set(XPU_XDNN_BASE_URL "${XPU_XDNN_BASE_URL}") endif() diff --git a/paddle/phi/kernels/xpu/rnn_grad_kernel.cc b/paddle/phi/kernels/xpu/rnn_grad_kernel.cc index bc68fa6f15d2e5be1964b080ad6a5571e8decb6e..c7ba5e7f765f05328bd963ca17b3ae5df48aaf66 100644 --- a/paddle/phi/kernels/xpu/rnn_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/rnn_grad_kernel.cc @@ -92,14 +92,14 @@ void RnnGradKernel(const Context& dev_ctx, std::vector> parameter_lists; parameter_lists.resize(num_layers); - reset_parameter_vector(weight_list, num_layers, is_bidirec, ¶meter_lists); + ResetParameterVector(weight_list, num_layers, is_bidirec, ¶meter_lists); for (unsigned int i = 0; i < weight_grad_list.size(); ++i) { dev_ctx.template Alloc(weight_grad_list[i]); } std::vector> parameter_lists_grad; parameter_lists_grad.resize(num_layers); - reset_parameter_vector( + ResetParameterVector( weight_grad_list, num_layers, is_bidirec, ¶meter_lists_grad); // allocate the memory and initization the x_grad diff --git a/paddle/phi/kernels/xpu/rnn_kernel.cc b/paddle/phi/kernels/xpu/rnn_kernel.cc index 6d9234b38f16f5ad550a39bfe02c3f7d865fcda5..9465839043a6ee3a289f0522e38c5fcb1160387a 100644 --- a/paddle/phi/kernels/xpu/rnn_kernel.cc +++ b/paddle/phi/kernels/xpu/rnn_kernel.cc @@ -89,7 +89,7 @@ void RnnKernel(const Context& dev_ctx, // weightlist std::vector> parameter_lists; parameter_lists.resize(num_layers); - reset_parameter_vector(weight_list, num_layers, is_bidirec, ¶meter_lists); + ResetParameterVector(weight_list, num_layers, is_bidirec, ¶meter_lists); // init the output and allocate the memory dev_ctx.template Alloc(out); diff --git a/paddle/phi/kernels/xpu/rnn_util.h b/paddle/phi/kernels/xpu/rnn_util.h index c42cb1309d11336cd2cac6f70d713a980271779b..5310b35e64dc36177ce7028bfeba57b35fe1879f 100644 --- a/paddle/phi/kernels/xpu/rnn_util.h +++ b/paddle/phi/kernels/xpu/rnn_util.h @@ -19,10 +19,10 @@ namespace phi { template -void reset_parameter_vector(const std::vector& raw_params_vec, - const int& num_layers, - const bool& is_bidirec, - std::vector>* params_vec) { +void ResetParameterVector(const std::vector& raw_params_vec, + const int& num_layers, + const bool& is_bidirec, + std::vector>* params_vec) { // the parameter raw seuquence is [FWhi, FWhh, BWhi, BWhh] * num_layers // + [FBhi, FBhh, BBhi, BBhh] * num_layers, we will reset the parameter to // ([FWhi, FWhh, FBhi, FBhh] + [BWhi, BWhh, BBhi, BBhh]) * num_layers diff --git a/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py b/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py index 2258d354f96f38025dd98e182498ec777486ff0c..16c309e0c1483b42ef32445299f4ba7920a7a212 100644 --- a/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py +++ b/python/paddle/fluid/tests/unittests/xpu/get_test_cover_info.py @@ -91,7 +91,10 @@ xpu_test_op_type_white_list = [ "lamb_float16", "lars_momentum_float32", "resnet_unit", - "resnet_unit_grad" + "resnet_unit_grad", + "c_embedding_float32", # unittests of collective ops do not using xpu testing framework + "c_sync_comm_stream_float32", + "c_sync_calc_stream_float32", ] xpu_test_device_op_white_list = [] xpu_test_device_op_type_white_list = []