未验证 提交 ca2ea162 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunsed-parameter]warning (#53679)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 96188fc1
...@@ -339,7 +339,7 @@ static bool CompareTensor(const PaddleTensor &a, const PaddleTensor &b) { ...@@ -339,7 +339,7 @@ static bool CompareTensor(const PaddleTensor &a, const PaddleTensor &b) {
} }
static std::string DescribeTensor(const PaddleTensor &tensor, static std::string DescribeTensor(const PaddleTensor &tensor,
int max_num_of_data = 15) { int max_num_of_data UNUSED = 15) {
std::stringstream os; std::stringstream os;
os << "Tensor [" << tensor.name << "]\n"; os << "Tensor [" << tensor.name << "]\n";
os << " - type: "; os << " - type: ";
......
...@@ -60,7 +60,7 @@ class AssignFunctor { ...@@ -60,7 +60,7 @@ class AssignFunctor {
} }
template <typename T> template <typename T>
void operator()(const T &v) const { void operator()(const T &v UNUSED) const {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
true, true,
false, false,
......
...@@ -1433,8 +1433,7 @@ struct KernelRegistrar { ...@@ -1433,8 +1433,7 @@ struct KernelRegistrar {
reg_type, kernel_name, layout, meta_kernel_fn, N, ...) \ reg_type, kernel_name, layout, meta_kernel_fn, N, ...) \
static void __PD_KERNEL_args_def_FN_##kernel_name##_##layout( \ static void __PD_KERNEL_args_def_FN_##kernel_name##_##layout( \
const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel); \ const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel); \
PD_EXPAND( \ PD_EXPAND(PD_CONCATENATE(_PD_FOR_ALL_BACKEND_DTYPE_, N)( \
PD_CONCATENATE(_PD_FOR_ALL_BACKEND_DTYPE_, N)( \
reg_type, \ reg_type, \
kernel_name, \ kernel_name, \
layout, \ layout, \
...@@ -1442,7 +1441,8 @@ struct KernelRegistrar { ...@@ -1442,7 +1441,8 @@ struct KernelRegistrar {
__PD_KERNEL_args_def_FN_##kernel_name##_##layout, \ __PD_KERNEL_args_def_FN_##kernel_name##_##layout, \
__VA_ARGS__) void \ __VA_ARGS__) void \
__PD_KERNEL_args_def_FN_##kernel_name##_##layout( \ __PD_KERNEL_args_def_FN_##kernel_name##_##layout( \
const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel)) const ::phi::KernelKey& kernel_key UNUSED, \
::phi::Kernel* kernel UNUSED))
#ifndef _WIN32 #ifndef _WIN32
#define ___PD_REGISTER_KERNEL_FOR_ALL_BACKEND_DTYPE( \ #define ___PD_REGISTER_KERNEL_FOR_ALL_BACKEND_DTYPE( \
reg_type, kernel_name, backend, layout, kernel_fn, args_def_fn) \ reg_type, kernel_name, backend, layout, kernel_fn, args_def_fn) \
......
...@@ -28,7 +28,7 @@ template <typename T, typename Context> ...@@ -28,7 +28,7 @@ template <typename T, typename Context>
void CheckMemoryContinueKernel(const Context &dev_ctx, void CheckMemoryContinueKernel(const Context &dev_ctx,
const std::vector<const DenseTensor *> &input, const std::vector<const DenseTensor *> &input,
DenseTensor *output, DenseTensor *output,
std::vector<DenseTensor *> xout) { std::vector<DenseTensor *> xout UNUSED) {
int64_t size_of_dtype = sizeof(T); int64_t size_of_dtype = sizeof(T);
auto dtype = input.at(0)->dtype(); auto dtype = input.at(0)->dtype();
int64_t numel = 0; int64_t numel = 0;
......
...@@ -52,7 +52,7 @@ template <typename T, typename Context, typename Reducer> ...@@ -52,7 +52,7 @@ template <typename T, typename Context, typename Reducer>
void ScanKernel(const Context& dev_ctx, void ScanKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
int axis, int axis,
bool flatten, bool flatten UNUSED,
bool exclusive, bool exclusive,
bool reverse, bool reverse,
Reducer reducer, Reducer reducer,
......
...@@ -22,7 +22,7 @@ namespace phi { ...@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void DiagonalGradKernel(const Context& dev_ctx, void DiagonalGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int offset, int offset,
int axis1, int axis1,
......
...@@ -75,7 +75,9 @@ void ElemwiseExplicitGradCompute(const CPUContext& dev_ctx, ...@@ -75,7 +75,9 @@ void ElemwiseExplicitGradCompute(const CPUContext& dev_ctx,
*/ */
template <typename T> template <typename T>
struct IdentityGrad { struct IdentityGrad {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout; } HOSTDEVICE T operator()(T x UNUSED, T y UNUSED, T out UNUSED, T dout) const {
return dout;
}
}; };
template <typename T> template <typename T>
...@@ -120,12 +122,16 @@ ElementwiseAddGrad(const CPUContext& ctx, ...@@ -120,12 +122,16 @@ ElementwiseAddGrad(const CPUContext& ctx,
template <typename T> template <typename T>
struct SubGradDX { struct SubGradDX {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout; } HOSTDEVICE T operator()(T x UNUSED, T y UNUSED, T out UNUSED, T dout) const {
return dout;
}
}; };
template <typename T> template <typename T>
struct SubGradDY { struct SubGradDY {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return -dout; } HOSTDEVICE T operator()(T x UNUSED, T y UNUSED, T out UNUSED, T dout) const {
return -dout;
}
}; };
template <typename T> template <typename T>
......
...@@ -24,7 +24,7 @@ namespace phi { ...@@ -24,7 +24,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void GatherGradKernel(const Context& dev_ctx, void GatherGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& index, const DenseTensor& index,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const Scalar& axis, const Scalar& axis,
......
...@@ -21,7 +21,7 @@ namespace phi { ...@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void OverlapAddGradKernel(const Context& dev_ctx, void OverlapAddGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int hop_length, int hop_length,
int axis, int axis,
......
...@@ -26,7 +26,7 @@ void ConstPad3DFuncNCDHW(const T* in_data, ...@@ -26,7 +26,7 @@ void ConstPad3DFuncNCDHW(const T* in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -53,7 +53,7 @@ void ConstPad3DFuncNDHWC(const T* in_data, ...@@ -53,7 +53,7 @@ void ConstPad3DFuncNDHWC(const T* in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -88,7 +88,7 @@ void ReflectPad3DFuncNCDHW(const T* in_data, ...@@ -88,7 +88,7 @@ void ReflectPad3DFuncNCDHW(const T* in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -97,7 +97,7 @@ void ReflectPad3DFuncNCDHW(const T* in_data, ...@@ -97,7 +97,7 @@ void ReflectPad3DFuncNCDHW(const T* in_data,
const int out_d, const int out_d,
const int out_h, const int out_h,
const int out_w, const int out_w,
const T value) { const T value UNUSED) {
int in_d = out_d - pad_front; int in_d = out_d - pad_front;
int in_h = out_h - pad_top; int in_h = out_h - pad_top;
int in_w = out_w - pad_left; int in_w = out_w - pad_left;
...@@ -120,7 +120,7 @@ void ReflectPad3DFuncNDHWC(const T* in_data, ...@@ -120,7 +120,7 @@ void ReflectPad3DFuncNDHWC(const T* in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -129,7 +129,7 @@ void ReflectPad3DFuncNDHWC(const T* in_data, ...@@ -129,7 +129,7 @@ void ReflectPad3DFuncNDHWC(const T* in_data,
const int out_d, const int out_d,
const int out_h, const int out_h,
const int out_w, const int out_w,
const T value) { const T value UNUSED) {
int in_d = out_d - pad_front; int in_d = out_d - pad_front;
int in_h = out_h - pad_top; int in_h = out_h - pad_top;
int in_w = out_w - pad_left; int in_w = out_w - pad_left;
...@@ -156,7 +156,7 @@ void ReplicatePad3DFuncNCDHW(const T* in_data, ...@@ -156,7 +156,7 @@ void ReplicatePad3DFuncNCDHW(const T* in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -165,7 +165,7 @@ void ReplicatePad3DFuncNCDHW(const T* in_data, ...@@ -165,7 +165,7 @@ void ReplicatePad3DFuncNCDHW(const T* in_data,
const int out_d, const int out_d,
const int out_h, const int out_h,
const int out_w, const int out_w,
const T value) { const T value UNUSED) {
int in_d = std::min(in_depth - 1, std::max(out_d - pad_front, 0)); int in_d = std::min(in_depth - 1, std::max(out_d - pad_front, 0));
int in_h = std::min(in_height - 1, std::max(out_h - pad_top, 0)); int in_h = std::min(in_height - 1, std::max(out_h - pad_top, 0));
int in_w = std::min(in_width - 1, std::max(out_w - pad_left, 0)); int in_w = std::min(in_width - 1, std::max(out_w - pad_left, 0));
...@@ -181,7 +181,7 @@ void ReplicatePad3DFuncNDHWC(const T* in_data, ...@@ -181,7 +181,7 @@ void ReplicatePad3DFuncNDHWC(const T* in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -190,7 +190,7 @@ void ReplicatePad3DFuncNDHWC(const T* in_data, ...@@ -190,7 +190,7 @@ void ReplicatePad3DFuncNDHWC(const T* in_data,
const int out_d, const int out_d,
const int out_h, const int out_h,
const int out_w, const int out_w,
const T value) { const T value UNUSED) {
int in_d = std::min(in_depth - 1, std::max(out_d - pad_front, 0)); int in_d = std::min(in_depth - 1, std::max(out_d - pad_front, 0));
int in_h = std::min(in_height - 1, std::max(out_h - pad_top, 0)); int in_h = std::min(in_height - 1, std::max(out_h - pad_top, 0));
int in_w = std::min(in_width - 1, std::max(out_w - pad_left, 0)); int in_w = std::min(in_width - 1, std::max(out_w - pad_left, 0));
...@@ -210,7 +210,7 @@ void CircularPad3DFuncNCDHW(const T* in_data, ...@@ -210,7 +210,7 @@ void CircularPad3DFuncNCDHW(const T* in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -219,7 +219,7 @@ void CircularPad3DFuncNCDHW(const T* in_data, ...@@ -219,7 +219,7 @@ void CircularPad3DFuncNCDHW(const T* in_data,
const int out_d, const int out_d,
const int out_h, const int out_h,
const int out_w, const int out_w,
const T value) { const T value UNUSED) {
int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth; int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth;
int in_h = ((out_h - pad_top) % in_height + in_height) % in_height; int in_h = ((out_h - pad_top) % in_height + in_height) % in_height;
int in_w = ((out_w - pad_left) % in_width + in_width) % in_width; int in_w = ((out_w - pad_left) % in_width + in_width) % in_width;
...@@ -235,7 +235,7 @@ void CircularPad3DFuncNDHWC(const T* in_data, ...@@ -235,7 +235,7 @@ void CircularPad3DFuncNDHWC(const T* in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -244,7 +244,7 @@ void CircularPad3DFuncNDHWC(const T* in_data, ...@@ -244,7 +244,7 @@ void CircularPad3DFuncNDHWC(const T* in_data,
const int out_d, const int out_d,
const int out_h, const int out_h,
const int out_w, const int out_w,
const T value) { const T value UNUSED) {
int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth; int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth;
int in_h = ((out_h - pad_top) % in_height + in_height) % in_height; int in_h = ((out_h - pad_top) % in_height + in_height) % in_height;
int in_w = ((out_w - pad_left) % in_width + in_width) % in_width; int in_w = ((out_w - pad_left) % in_width + in_width) % in_width;
......
...@@ -22,7 +22,7 @@ namespace phi { ...@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void RollGradKernel(const Context& dev_ctx, void RollGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const IntArray& shifts, const IntArray& shifts,
const std::vector<int64_t>& axis, const std::vector<int64_t>& axis,
......
...@@ -1112,7 +1112,7 @@ struct Expm1GradFunctor : public BaseActivationFunctor<T> { ...@@ -1112,7 +1112,7 @@ struct Expm1GradFunctor : public BaseActivationFunctor<T> {
typename Out, typename Out,
typename dOut, typename dOut,
typename dX> typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const { void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
dx.device(d) = dout * out + dout; dx.device(d) = dout * out + dout;
} }
...@@ -1412,7 +1412,7 @@ struct LeakyReluGradFunctor : public BaseActivationFunctor<T> { ...@@ -1412,7 +1412,7 @@ struct LeakyReluGradFunctor : public BaseActivationFunctor<T> {
typename Out, typename Out,
typename dOut, typename dOut,
typename dX> typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const { void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto temp1 = auto temp1 =
static_cast<T>(alpha) * (x < static_cast<T>(0)).template cast<T>(); static_cast<T>(alpha) * (x < static_cast<T>(0)).template cast<T>();
auto temp2 = (x >= static_cast<T>(0)).template cast<T>(); auto temp2 = (x >= static_cast<T>(0)).template cast<T>();
......
...@@ -288,7 +288,7 @@ template struct SelectedRowsSumTo<phi::CPUContext, double>; ...@@ -288,7 +288,7 @@ template struct SelectedRowsSumTo<phi::CPUContext, double>;
template <typename T> template <typename T>
struct SelectedRowsAddToTensor<phi::CPUContext, T> { struct SelectedRowsAddToTensor<phi::CPUContext, T> {
void operator()(const phi::CPUContext& context, void operator()(const phi::CPUContext& context UNUSED,
const phi::SelectedRows& input1, const phi::SelectedRows& input1,
phi::DenseTensor* input2) { phi::DenseTensor* input2) {
if (UNLIKELY(input1.rows().size() == 0)) { if (UNLIKELY(input1.rows().size() == 0)) {
......
...@@ -36,7 +36,7 @@ void SquaredL2Norm(const phi::CPUContext& ctx, ...@@ -36,7 +36,7 @@ void SquaredL2Norm(const phi::CPUContext& ctx,
const T1* x, const T1* x,
T2* y, T2* y,
size_t numel, size_t numel,
memory_utils::Buffer* buffer = nullptr) { memory_utils::Buffer* buffer UNUSED = nullptr) {
if (std::is_same<T1, T2>::value) { if (std::is_same<T1, T2>::value) {
using EigenT = typename phi::EigenTensor<T1, 1>::Type; using EigenT = typename phi::EigenTensor<T1, 1>::Type;
using ConstEigenT = typename phi::EigenTensor<T1, 1>::ConstType; using ConstEigenT = typename phi::EigenTensor<T1, 1>::ConstType;
......
...@@ -24,8 +24,8 @@ namespace phi { ...@@ -24,8 +24,8 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void FillGradKernel(const Context& dev_ctx, void FillGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad, const DenseTensor& out_grad UNUSED,
const Scalar& value, const Scalar& value UNUSED,
DenseTensor* in_grad) { DenseTensor* in_grad) {
if (in_grad) { if (in_grad) {
dev_ctx.template Alloc<T>(in_grad); dev_ctx.template Alloc<T>(in_grad);
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void FrameGradKernel(const Context& dev_ctx, void FrameGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& dout, const DenseTensor& dout,
int frame_length, int frame_length,
int hop_length, int hop_length,
......
...@@ -22,7 +22,7 @@ template <typename T, typename Context> ...@@ -22,7 +22,7 @@ template <typename T, typename Context>
void PadGradKernel(const Context& dev_ctx, void PadGradKernel(const Context& dev_ctx,
const DenseTensor& d_out, const DenseTensor& d_out,
const std::vector<int>& paddings, const std::vector<int>& paddings,
const Scalar& pad_value, const Scalar& pad_value UNUSED,
DenseTensor* d_x) { DenseTensor* d_x) {
if (d_x == nullptr) { if (d_x == nullptr) {
return; return;
......
...@@ -82,7 +82,7 @@ struct TraceGradFunctor { ...@@ -82,7 +82,7 @@ struct TraceGradFunctor {
template <typename T, typename Context> template <typename T, typename Context>
void TraceGradKernel(const Context& ctx, void TraceGradKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int offset, int offset,
int axis1, int axis1,
......
...@@ -165,7 +165,7 @@ void IndicesCooKernel(const Context& dev_ctx UNUSED, ...@@ -165,7 +165,7 @@ void IndicesCooKernel(const Context& dev_ctx UNUSED,
} }
template <typename T, typename Context> template <typename T, typename Context>
void SparseCooTensorKernel(const Context& dev_ctx, void SparseCooTensorKernel(const Context& dev_ctx UNUSED,
const DenseTensor& values, const DenseTensor& values,
const DenseTensor& indices, const DenseTensor& indices,
const std::vector<int64_t>& shape, const std::vector<int64_t>& shape,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册