diff --git a/paddle/fluid/framework/details/reduce_op_handle_test.cc b/paddle/fluid/framework/details/reduce_op_handle_test.cc index ad7888c065402ccc1d6dd03b53c34318f77bc03d..7f38629f4e6065d59d4bb700ddf923da57634272 100644 --- a/paddle/fluid/framework/details/reduce_op_handle_test.cc +++ b/paddle/fluid/framework/details/reduce_op_handle_test.cc @@ -69,7 +69,7 @@ struct TestReduceOpHandle { for (int i = 0; i < count; ++i) { auto p = p::CUDAPlace(i); gpu_list_.push_back(p); - ctxs_.emplace_back(new p::phi::GPUContext(p)); + ctxs_.emplace_back(new phi::GPUContext(p)); } nccl_ctxs_.reset(new platform::NCCLContextMap(gpu_list_)); #else diff --git a/paddle/fluid/operators/dropout_op_test.cc b/paddle/fluid/operators/dropout_op_test.cc index d51c57d6eab8d0fa12a7e2e2b4c2edf1e42d6828..f63e78587471118378497ff6ecdd913364647500 100644 --- a/paddle/fluid/operators/dropout_op_test.cc +++ b/paddle/fluid/operators/dropout_op_test.cc @@ -98,7 +98,7 @@ TEST(Dropout, CPUDense) { TEST(Dropout, GPUDense) { f::Scope scope; p::CUDAPlace place; - p::phi::GPUContext ctx(place); + phi::GPUContext ctx(place); Compare(scope, ctx); } */ diff --git a/paddle/fluid/operators/sparse_attention_op.cu b/paddle/fluid/operators/sparse_attention_op.cu index 8bf431e59f017ca9f8c5ef2f426b3ef3bc4779d7..95c0562eb7272012798b8fb3ef8c76dea2c6d41d 100644 --- a/paddle/fluid/operators/sparse_attention_op.cu +++ b/paddle/fluid/operators/sparse_attention_op.cu @@ -881,10 +881,10 @@ class SparseAttentionGradCUDAKernel : public framework::OpKernel { } // namespace paddle REGISTER_OP_CUDA_KERNEL( sparse_attention, - ops::SparseAttentionCUDAKernel, - ops::SparseAttentionCUDAKernel); + ops::SparseAttentionCUDAKernel, + ops::SparseAttentionCUDAKernel); REGISTER_OP_CUDA_KERNEL( sparse_attention_grad, - ops::SparseAttentionGradCUDAKernel, - ops::SparseAttentionGradCUDAKernel); + ops::SparseAttentionGradCUDAKernel, + ops::SparseAttentionGradCUDAKernel);