未验证 提交 4c375454 编写于 作者: H huangjiyi 提交者: GitHub

[PHI Decoupling] remove dependency on "paddle/fluid/platform/errors.h" and...

[PHI Decoupling] remove dependency on "paddle/fluid/platform/errors.h" and "paddle/fluid/platform/fast_divmod.h" in phi. (#47815)

* rm "paddle/fluid/platform/errors.h" in phi

* rm "paddle/fluid/platform/fast_divmod.h" in phi
上级 3a14857b
......@@ -40,10 +40,10 @@ void* GetDsoHandle(const std::string& dso_name) {
void* dso_handle = dlopen(dso_name.c_str(), dynload_flags);
PADDLE_ENFORCE_NOT_NULL(dso_handle,
paddle::platform::errors::NotFound(
"TensorRT is needed, "
"but TensorRT dynamic library is not found."));
PADDLE_ENFORCE_NOT_NULL(
dso_handle,
phi::errors::NotFound("TensorRT is needed, "
"but TensorRT dynamic library is not found."));
return dso_handle;
}
......
......@@ -174,10 +174,10 @@ void SelectedRowsImpl::Get(const phi::DenseTensor& ids,
phi::DenseTensor* value,
bool auto_grown,
bool is_test) {
PADDLE_ENFORCE_EQ(value->IsInitialized(),
true,
paddle::platform::errors::InvalidArgument(
"The value tensor is not initialized."));
PADDLE_ENFORCE_EQ(
value->IsInitialized(),
true,
phi::errors::InvalidArgument("The value tensor is not initialized."));
if (ids.numel() == 0) {
VLOG(3) << "keys is empty, please check data!";
} else {
......
......@@ -70,7 +70,7 @@ class AutoTuneBase {
PADDLE_ENFORCE_GT(
kernels_.size(),
0,
paddle::platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"kernel num must be greater than 0, now is %d", kernels_.size()));
is_init_ = true;
......@@ -102,7 +102,7 @@ class AutoTuneBase {
PADDLE_ENFORCE_GT(
kernels_.size(),
0,
paddle::platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"kernel num must be greater than 0, now is %d", kernels_.size()));
size_t best_idx = 0;
float min_time = std::numeric_limits<float>::max();
......
......@@ -15,7 +15,7 @@
#pragma once
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/errors.h"
#include "paddle/phi/core/errors.h"
namespace phi {
namespace funcs {
......
......@@ -20,7 +20,7 @@
#include "paddle/phi/kernels/funcs/eigen/common.h"
#if defined(__NVCC__) || defined(__HIPCC__)
#include "paddle/fluid/platform/fast_divmod.h"
#include "paddle/phi/kernels/primitive/datamover_primitives.h"
#endif
namespace phi {
......@@ -142,7 +142,7 @@ inline std::vector<T> get_new_data_from_tensor(
}
#if defined(__NVCC__) || defined(__HIPCC__)
using paddle::platform::FastDivMod;
using phi::kps::details::FastDivMod;
struct FastDivModForInterpolate {
public:
......
......@@ -16,56 +16,56 @@ limitations under the License. */
#include <vector>
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/fast_divmod.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/kernels/funcs/pooling.h"
#include "paddle/phi/kernels/funcs/reduce_function.h"
#include "paddle/phi/kernels/primitive/datamover_primitives.h"
namespace phi {
namespace funcs {
struct FastDivModForPooling {
public:
paddle::platform::FastDivMod channel;
paddle::platform::FastDivMod width;
paddle::platform::FastDivMod height;
phi::kps::details::FastDivMod channel;
phi::kps::details::FastDivMod width;
phi::kps::details::FastDivMod height;
explicit HOSTDEVICE FastDivModForPooling(const int channels,
const int output_width,
const int output_height) {
channel = paddle::platform::FastDivMod(channels);
width = paddle::platform::FastDivMod(output_width);
height = paddle::platform::FastDivMod(output_height);
channel = phi::kps::details::FastDivMod(channels);
width = phi::kps::details::FastDivMod(output_width);
height = phi::kps::details::FastDivMod(output_height);
}
};
struct FastDivModForPooling3D {
public:
paddle::platform::FastDivMod channel;
paddle::platform::FastDivMod width;
paddle::platform::FastDivMod height;
paddle::platform::FastDivMod depth;
phi::kps::details::FastDivMod channel;
phi::kps::details::FastDivMod width;
phi::kps::details::FastDivMod height;
phi::kps::details::FastDivMod depth;
explicit HOSTDEVICE FastDivModForPooling3D(const int channels,
const int output_width,
const int output_height,
const int output_depth) {
channel = paddle::platform::FastDivMod(channels);
width = paddle::platform::FastDivMod(output_width);
height = paddle::platform::FastDivMod(output_height);
depth = paddle::platform::FastDivMod(output_depth);
channel = phi::kps::details::FastDivMod(channels);
width = phi::kps::details::FastDivMod(output_width);
height = phi::kps::details::FastDivMod(output_height);
depth = phi::kps::details::FastDivMod(output_depth);
}
};
struct FastDivModForPoolingWithMoreStaff {
public:
paddle::platform::FastDivMod channel;
paddle::platform::FastDivMod width;
paddle::platform::FastDivMod height;
paddle::platform::FastDivMod ksize_w;
paddle::platform::FastDivMod ksize_h;
paddle::platform::FastDivMod stride_w;
paddle::platform::FastDivMod stride_h;
phi::kps::details::FastDivMod channel;
phi::kps::details::FastDivMod width;
phi::kps::details::FastDivMod height;
phi::kps::details::FastDivMod ksize_w;
phi::kps::details::FastDivMod ksize_h;
phi::kps::details::FastDivMod stride_w;
phi::kps::details::FastDivMod stride_h;
explicit HOSTDEVICE FastDivModForPoolingWithMoreStaff(
const int channels,
......@@ -75,13 +75,13 @@ struct FastDivModForPoolingWithMoreStaff {
const int ksize_height,
const int stride_width,
const int stride_height) {
channel = paddle::platform::FastDivMod(channels);
width = paddle::platform::FastDivMod(input_width);
height = paddle::platform::FastDivMod(input_height);
ksize_w = paddle::platform::FastDivMod(ksize_width);
ksize_h = paddle::platform::FastDivMod(ksize_height);
stride_w = paddle::platform::FastDivMod(stride_width);
stride_h = paddle::platform::FastDivMod(stride_height);
channel = phi::kps::details::FastDivMod(channels);
width = phi::kps::details::FastDivMod(input_width);
height = phi::kps::details::FastDivMod(input_height);
ksize_w = phi::kps::details::FastDivMod(ksize_width);
ksize_h = phi::kps::details::FastDivMod(ksize_height);
stride_w = phi::kps::details::FastDivMod(stride_width);
stride_h = phi::kps::details::FastDivMod(stride_height);
}
};
......
......@@ -15,7 +15,6 @@
#include "paddle/phi/kernels/interpolate_grad_kernel.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/fast_divmod.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/common/amp_type_traits.h"
......@@ -24,6 +23,7 @@
#include "paddle/phi/kernels/funcs/interpolate_function.h"
#include "paddle/phi/kernels/funcs/math_cuda_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/primitive/datamover_primitives.h"
namespace phi {
......
......@@ -16,7 +16,6 @@
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/fast_divmod.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/common/amp_type_traits.h"
......@@ -24,9 +23,10 @@
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/interpolate_function.h"
#include "paddle/phi/kernels/primitive/datamover_primitives.h"
namespace phi {
using paddle::platform::FastDivMod;
using phi::kps::details::FastDivMod;
template <typename T>
__forceinline__ __device__ void PreCalculatorForLinearInterpInputIndex(
......
......@@ -108,7 +108,7 @@ void BroadcastTensorsKernel(const Context& ctx,
SWITCH_OUT_RANK_CASE(5)
SWITCH_OUT_RANK_CASE(6)
default: {
PADDLE_THROW(paddle::platform::errors::InvalidArgument(
PADDLE_THROW(phi::errors::InvalidArgument(
"Target tensor rank out of range"
"Maximum supported rank for broadcast is: 6"));
}
......
......@@ -129,7 +129,7 @@ static void ScaleCPU(DataType kernel_dtype,
break;
}
default: {
PADDLE_THROW(paddle::platform::errors::Fatal(
PADDLE_THROW(phi::errors::Fatal(
"Detected unsupported data type."
"Only Float64, Float32, BFloat16, Int64, Int32, Int16, Int8, UInt8 "
"are supported for now."));
......@@ -188,7 +188,7 @@ static void ScaleGPU(DataType kernel_dtype,
break;
}
default: {
PADDLE_THROW(paddle::platform::errors::Fatal(
PADDLE_THROW(phi::errors::Fatal(
"Detected unsupported data type."
"Only Float64, Float32, Float16, Int64, Int32, Int16, Int8, UInt8 "
"are "
......@@ -262,7 +262,7 @@ Tensor scale_switch_case(const Tensor& x,
break;
#endif
default:
PADDLE_THROW(paddle::platform::errors::Fatal(
PADDLE_THROW(phi::errors::Fatal(
"Detected unsupported backend."
"Only CPU and CUDA Backend are supported for now."
"Please double check if your backend falls into the above two "
......
......@@ -275,12 +275,12 @@ TEST(math_funciton, set_constant) {
auto* ctx = new phi::CPUContext();
phi::funcs::set_constant(*ctx, &t, 10);
for (int64_t i = 0; i < t.numel(); ++i) {
PADDLE_ENFORCE_EQ(10,
t.data<int>()[i],
paddle::platform::errors::InvalidArgument(
"Each value of input tensor should be 10, "
"but received %d.",
t.data<int>()[i]));
PADDLE_ENFORCE_EQ(
10,
t.data<int>()[i],
phi::errors::InvalidArgument("Each value of input tensor should be 10, "
"but received %d.",
t.data<int>()[i]));
}
delete ctx;
}
......
......@@ -26,7 +26,7 @@ void fill_fp16_data(phi::dtype::float16* in_ptr,
PADDLE_ENFORCE_EQ(
size,
data.size(),
paddle::platform::errors::InvalidArgument(
phi::errors::InvalidArgument(
"The size of argument data should"
" be equal to the argument size. Expected %d, but received %d.",
size,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册