未验证 提交 ca2ea162 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunsed-parameter]warning (#53679)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 96188fc1
......@@ -339,7 +339,7 @@ static bool CompareTensor(const PaddleTensor &a, const PaddleTensor &b) {
}
static std::string DescribeTensor(const PaddleTensor &tensor,
int max_num_of_data = 15) {
int max_num_of_data UNUSED = 15) {
std::stringstream os;
os << "Tensor [" << tensor.name << "]\n";
os << " - type: ";
......
......@@ -60,7 +60,7 @@ class AssignFunctor {
}
template <typename T>
void operator()(const T &v) const {
void operator()(const T &v UNUSED) const {
PADDLE_ENFORCE_EQ(
true,
false,
......
......@@ -1433,16 +1433,16 @@ struct KernelRegistrar {
reg_type, kernel_name, layout, meta_kernel_fn, N, ...) \
static void __PD_KERNEL_args_def_FN_##kernel_name##_##layout( \
const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel); \
PD_EXPAND( \
PD_CONCATENATE(_PD_FOR_ALL_BACKEND_DTYPE_, N)( \
reg_type, \
kernel_name, \
layout, \
meta_kernel_fn, \
__PD_KERNEL_args_def_FN_##kernel_name##_##layout, \
__VA_ARGS__) void \
__PD_KERNEL_args_def_FN_##kernel_name##_##layout( \
const ::phi::KernelKey& kernel_key, ::phi::Kernel* kernel))
PD_EXPAND(PD_CONCATENATE(_PD_FOR_ALL_BACKEND_DTYPE_, N)( \
reg_type, \
kernel_name, \
layout, \
meta_kernel_fn, \
__PD_KERNEL_args_def_FN_##kernel_name##_##layout, \
__VA_ARGS__) void \
__PD_KERNEL_args_def_FN_##kernel_name##_##layout( \
const ::phi::KernelKey& kernel_key UNUSED, \
::phi::Kernel* kernel UNUSED))
#ifndef _WIN32
#define ___PD_REGISTER_KERNEL_FOR_ALL_BACKEND_DTYPE( \
reg_type, kernel_name, backend, layout, kernel_fn, args_def_fn) \
......
......@@ -28,7 +28,7 @@ template <typename T, typename Context>
void CheckMemoryContinueKernel(const Context &dev_ctx,
const std::vector<const DenseTensor *> &input,
DenseTensor *output,
std::vector<DenseTensor *> xout) {
std::vector<DenseTensor *> xout UNUSED) {
int64_t size_of_dtype = sizeof(T);
auto dtype = input.at(0)->dtype();
int64_t numel = 0;
......
......@@ -52,7 +52,7 @@ template <typename T, typename Context, typename Reducer>
void ScanKernel(const Context& dev_ctx,
const DenseTensor& x,
int axis,
bool flatten,
bool flatten UNUSED,
bool exclusive,
bool reverse,
Reducer reducer,
......
......@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context>
void DiagonalGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& out_grad,
int offset,
int axis1,
......
......@@ -75,7 +75,9 @@ void ElemwiseExplicitGradCompute(const CPUContext& dev_ctx,
*/
template <typename T>
struct IdentityGrad {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout; }
HOSTDEVICE T operator()(T x UNUSED, T y UNUSED, T out UNUSED, T dout) const {
return dout;
}
};
template <typename T>
......@@ -120,12 +122,16 @@ ElementwiseAddGrad(const CPUContext& ctx,
template <typename T>
struct SubGradDX {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout; }
HOSTDEVICE T operator()(T x UNUSED, T y UNUSED, T out UNUSED, T dout) const {
return dout;
}
};
template <typename T>
struct SubGradDY {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return -dout; }
HOSTDEVICE T operator()(T x UNUSED, T y UNUSED, T out UNUSED, T dout) const {
return -dout;
}
};
template <typename T>
......
......@@ -24,7 +24,7 @@ namespace phi {
template <typename T, typename Context>
void GatherGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& index,
const DenseTensor& out_grad,
const Scalar& axis,
......
......@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context>
void OverlapAddGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& out_grad,
int hop_length,
int axis,
......
......@@ -26,7 +26,7 @@ void ConstPad3DFuncNCDHW(const T* in_data,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_depth UNUSED,
const int out_height,
const int out_width,
const int pad_front,
......@@ -53,7 +53,7 @@ void ConstPad3DFuncNDHWC(const T* in_data,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_depth UNUSED,
const int out_height,
const int out_width,
const int pad_front,
......@@ -88,7 +88,7 @@ void ReflectPad3DFuncNCDHW(const T* in_data,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_depth UNUSED,
const int out_height,
const int out_width,
const int pad_front,
......@@ -97,7 +97,7 @@ void ReflectPad3DFuncNCDHW(const T* in_data,
const int out_d,
const int out_h,
const int out_w,
const T value) {
const T value UNUSED) {
int in_d = out_d - pad_front;
int in_h = out_h - pad_top;
int in_w = out_w - pad_left;
......@@ -120,7 +120,7 @@ void ReflectPad3DFuncNDHWC(const T* in_data,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_depth UNUSED,
const int out_height,
const int out_width,
const int pad_front,
......@@ -129,7 +129,7 @@ void ReflectPad3DFuncNDHWC(const T* in_data,
const int out_d,
const int out_h,
const int out_w,
const T value) {
const T value UNUSED) {
int in_d = out_d - pad_front;
int in_h = out_h - pad_top;
int in_w = out_w - pad_left;
......@@ -156,7 +156,7 @@ void ReplicatePad3DFuncNCDHW(const T* in_data,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_depth UNUSED,
const int out_height,
const int out_width,
const int pad_front,
......@@ -165,7 +165,7 @@ void ReplicatePad3DFuncNCDHW(const T* in_data,
const int out_d,
const int out_h,
const int out_w,
const T value) {
const T value UNUSED) {
int in_d = std::min(in_depth - 1, std::max(out_d - pad_front, 0));
int in_h = std::min(in_height - 1, std::max(out_h - pad_top, 0));
int in_w = std::min(in_width - 1, std::max(out_w - pad_left, 0));
......@@ -181,7 +181,7 @@ void ReplicatePad3DFuncNDHWC(const T* in_data,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_depth UNUSED,
const int out_height,
const int out_width,
const int pad_front,
......@@ -190,7 +190,7 @@ void ReplicatePad3DFuncNDHWC(const T* in_data,
const int out_d,
const int out_h,
const int out_w,
const T value) {
const T value UNUSED) {
int in_d = std::min(in_depth - 1, std::max(out_d - pad_front, 0));
int in_h = std::min(in_height - 1, std::max(out_h - pad_top, 0));
int in_w = std::min(in_width - 1, std::max(out_w - pad_left, 0));
......@@ -210,7 +210,7 @@ void CircularPad3DFuncNCDHW(const T* in_data,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_depth UNUSED,
const int out_height,
const int out_width,
const int pad_front,
......@@ -219,7 +219,7 @@ void CircularPad3DFuncNCDHW(const T* in_data,
const int out_d,
const int out_h,
const int out_w,
const T value) {
const T value UNUSED) {
int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth;
int in_h = ((out_h - pad_top) % in_height + in_height) % in_height;
int in_w = ((out_w - pad_left) % in_width + in_width) % in_width;
......@@ -235,7 +235,7 @@ void CircularPad3DFuncNDHWC(const T* in_data,
const int in_depth,
const int in_height,
const int in_width,
const int out_depth,
const int out_depth UNUSED,
const int out_height,
const int out_width,
const int pad_front,
......@@ -244,7 +244,7 @@ void CircularPad3DFuncNDHWC(const T* in_data,
const int out_d,
const int out_h,
const int out_w,
const T value) {
const T value UNUSED) {
int in_d = ((out_d - pad_front) % in_depth + in_depth) % in_depth;
int in_h = ((out_h - pad_top) % in_height + in_height) % in_height;
int in_w = ((out_w - pad_left) % in_width + in_width) % in_width;
......
......@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context>
void RollGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& out_grad,
const IntArray& shifts,
const std::vector<int64_t>& axis,
......
......@@ -1112,7 +1112,7 @@ struct Expm1GradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
dx.device(d) = dout * out + dout;
}
......@@ -1412,7 +1412,7 @@ struct LeakyReluGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x, Out out UNUSED, dOut dout, dX dx) const {
auto temp1 =
static_cast<T>(alpha) * (x < static_cast<T>(0)).template cast<T>();
auto temp2 = (x >= static_cast<T>(0)).template cast<T>();
......
......@@ -288,7 +288,7 @@ template struct SelectedRowsSumTo<phi::CPUContext, double>;
template <typename T>
struct SelectedRowsAddToTensor<phi::CPUContext, T> {
void operator()(const phi::CPUContext& context,
void operator()(const phi::CPUContext& context UNUSED,
const phi::SelectedRows& input1,
phi::DenseTensor* input2) {
if (UNLIKELY(input1.rows().size() == 0)) {
......
......@@ -36,7 +36,7 @@ void SquaredL2Norm(const phi::CPUContext& ctx,
const T1* x,
T2* y,
size_t numel,
memory_utils::Buffer* buffer = nullptr) {
memory_utils::Buffer* buffer UNUSED = nullptr) {
if (std::is_same<T1, T2>::value) {
using EigenT = typename phi::EigenTensor<T1, 1>::Type;
using ConstEigenT = typename phi::EigenTensor<T1, 1>::ConstType;
......
......@@ -24,8 +24,8 @@ namespace phi {
template <typename T, typename Context>
void FillGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
const Scalar& value,
const DenseTensor& out_grad UNUSED,
const Scalar& value UNUSED,
DenseTensor* in_grad) {
if (in_grad) {
dev_ctx.template Alloc<T>(in_grad);
......
......@@ -20,7 +20,7 @@
namespace phi {
template <typename T, typename Context>
void FrameGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& dout,
int frame_length,
int hop_length,
......
......@@ -22,7 +22,7 @@ template <typename T, typename Context>
void PadGradKernel(const Context& dev_ctx,
const DenseTensor& d_out,
const std::vector<int>& paddings,
const Scalar& pad_value,
const Scalar& pad_value UNUSED,
DenseTensor* d_x) {
if (d_x == nullptr) {
return;
......
......@@ -82,7 +82,7 @@ struct TraceGradFunctor {
template <typename T, typename Context>
void TraceGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& out_grad,
int offset,
int axis1,
......
......@@ -165,7 +165,7 @@ void IndicesCooKernel(const Context& dev_ctx UNUSED,
}
template <typename T, typename Context>
void SparseCooTensorKernel(const Context& dev_ctx,
void SparseCooTensorKernel(const Context& dev_ctx UNUSED,
const DenseTensor& values,
const DenseTensor& indices,
const std::vector<int64_t>& shape,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册