未验证 提交 96188fc1 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-paramter]warning (#53681)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 a9c3e32d
...@@ -28,7 +28,7 @@ namespace operators { ...@@ -28,7 +28,7 @@ namespace operators {
template <typename T, typename DeviceContext> template <typename T, typename DeviceContext>
class SendOpV2CPUKernel : public framework::OpKernel<T> { class SendOpV2CPUKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& ctx) const override { void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable( PADDLE_THROW(platform::errors::Unavailable(
"Do not support send for cpu kernel now.")); "Do not support send for cpu kernel now."));
} }
......
...@@ -33,7 +33,7 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler<T, dnnl::gru_forward, T_out> { ...@@ -33,7 +33,7 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler<T, dnnl::gru_forward, T_out> {
GRUMKLDNNHandler(const paddle::framework::ExecutionContext& ctx, GRUMKLDNNHandler(const paddle::framework::ExecutionContext& ctx,
const OneDNNContext& dev_ctx, const OneDNNContext& dev_ctx,
const dnnl::engine onednn_engine, const dnnl::engine onednn_engine,
platform::Place cpu_place, platform::Place cpu_place UNUSED,
const phi::DenseTensor* input, const phi::DenseTensor* input,
const phi::DenseTensor* weight_h, const phi::DenseTensor* weight_h,
const phi::DenseTensor* h0, const phi::DenseTensor* h0,
...@@ -42,7 +42,7 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler<T, dnnl::gru_forward, T_out> { ...@@ -42,7 +42,7 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler<T, dnnl::gru_forward, T_out> {
const int64_t Ti, const int64_t Ti,
const int64_t IC, const int64_t IC,
const int64_t OC, const int64_t OC,
const std::string& unique_name) const std::string& unique_name UNUSED)
: RNNMKLDNNHandler<T, dnnl::gru_forward, T_out>( : RNNMKLDNNHandler<T, dnnl::gru_forward, T_out>(
ctx, ctx,
dev_ctx, dev_ctx,
......
...@@ -33,17 +33,17 @@ class LSTMMKLDNNHandler ...@@ -33,17 +33,17 @@ class LSTMMKLDNNHandler
LSTMMKLDNNHandler(const paddle::framework::ExecutionContext& ctx, LSTMMKLDNNHandler(const paddle::framework::ExecutionContext& ctx,
const OneDNNContext& dev_ctx, const OneDNNContext& dev_ctx,
const dnnl::engine onednn_engine, const dnnl::engine onednn_engine,
platform::Place cpu_place, platform::Place cpu_place UNUSED,
const phi::DenseTensor* input, const phi::DenseTensor* input,
const phi::DenseTensor* weight_h, const phi::DenseTensor* weight_h,
const phi::DenseTensor* h0, const phi::DenseTensor* h0,
const phi::DenseTensor* c0, const phi::DenseTensor* c0 UNUSED,
const bool is_reverse, const bool is_reverse,
const int64_t N, const int64_t N,
const int64_t Ti, const int64_t Ti,
const int64_t IC, const int64_t IC,
const int64_t OC, const int64_t OC,
const std::string& unique_name) const std::string& unique_name UNUSED)
: RNNMKLDNNHandler<T, dnnl::lstm_forward, T_out>( : RNNMKLDNNHandler<T, dnnl::lstm_forward, T_out>(
ctx, ctx,
dev_ctx, dev_ctx,
......
...@@ -66,7 +66,7 @@ class MemcpyH2DFunctor { ...@@ -66,7 +66,7 @@ class MemcpyH2DFunctor {
} }
template <typename T> template <typename T>
void operator()(const T &v) const { void operator()(const T &v UNUSED) const {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
true, true,
false, false,
......
...@@ -81,7 +81,7 @@ class MemcpyFunctor { ...@@ -81,7 +81,7 @@ class MemcpyFunctor {
} }
template <typename T> template <typename T>
void operator()(const T &v) const { void operator()(const T &v UNUSED) const {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
true, true,
false, false,
......
...@@ -318,11 +318,11 @@ void Copy(const Context& dev_ctx, ...@@ -318,11 +318,11 @@ void Copy(const Context& dev_ctx,
} }
template <typename Context> template <typename Context>
void Copy(const Context& dev_ctx, void Copy(const Context& dev_ctx UNUSED,
const TensorArray& src, const TensorArray& src UNUSED,
Place dst_place, Place dst_place UNUSED,
bool blocking, bool blocking UNUSED,
TensorArray* dst) { TensorArray* dst UNUSED) {
// NOTE(Ruibiao): implements Copy() for TensorArray when needed. // NOTE(Ruibiao): implements Copy() for TensorArray when needed.
PADDLE_THROW(errors::Unimplemented("Copy for TensorArray is unimplemented.")); PADDLE_THROW(errors::Unimplemented("Copy for TensorArray is unimplemented."));
} }
......
...@@ -23,7 +23,7 @@ namespace phi { ...@@ -23,7 +23,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void DiagGradKernel(const Context& dev_ctx, void DiagGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int offset, int offset,
DenseTensor* x_grad) { DenseTensor* x_grad) {
......
...@@ -22,7 +22,7 @@ namespace phi { ...@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void FillDiagonalGradKernel(const Context& ctx, void FillDiagonalGradKernel(const Context& ctx,
const DenseTensor& out_grad, const DenseTensor& out_grad,
float value, float value UNUSED,
int offset, int offset,
bool wrap, bool wrap,
DenseTensor* x_grad) { DenseTensor* x_grad) {
......
...@@ -23,7 +23,7 @@ namespace phi { ...@@ -23,7 +23,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void GatherNdGradKernel(const Context &ctx, void GatherNdGradKernel(const Context &ctx,
const DenseTensor &x, const DenseTensor &x UNUSED,
const DenseTensor &index, const DenseTensor &index,
const DenseTensor &out_grad, const DenseTensor &out_grad,
DenseTensor *x_grad) { DenseTensor *x_grad) {
......
...@@ -23,7 +23,7 @@ namespace phi { ...@@ -23,7 +23,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void IndexAddGradKernel(const Context& ctx, void IndexAddGradKernel(const Context& ctx,
const DenseTensor& index, const DenseTensor& index,
const DenseTensor& add_value, const DenseTensor& add_value UNUSED,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int axis, int axis,
DenseTensor* x_grad, DenseTensor* x_grad,
......
...@@ -22,7 +22,7 @@ namespace phi { ...@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void IndexSelectGradKernel(const Context& ctx, void IndexSelectGradKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& index, const DenseTensor& index,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int dim, int dim,
......
...@@ -21,7 +21,7 @@ namespace phi { ...@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void MeanAllGradKernel(const Context& dev_ctx, void MeanAllGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& out_grad, const DenseTensor& out_grad,
DenseTensor* x_grad) { DenseTensor* x_grad) {
PADDLE_ENFORCE_EQ(out_grad.numel(), PADDLE_ENFORCE_EQ(out_grad.numel(),
......
...@@ -25,11 +25,11 @@ namespace phi { ...@@ -25,11 +25,11 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void PutAlongAxisGradKernel(const Context& dev_ctx, void PutAlongAxisGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& index, const DenseTensor& index,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int axis, int axis,
const std::string& reduce, const std::string& reduce UNUSED,
DenseTensor* x_grad, DenseTensor* x_grad,
DenseTensor* value_grad) { DenseTensor* value_grad) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
......
...@@ -24,7 +24,7 @@ namespace phi { ...@@ -24,7 +24,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void ScatterNdAddGradKernel(const Context &ctx, void ScatterNdAddGradKernel(const Context &ctx,
const DenseTensor &index, const DenseTensor &index,
const DenseTensor &updates, const DenseTensor &updates UNUSED,
const DenseTensor &out_grad, const DenseTensor &out_grad,
DenseTensor *x_grad, DenseTensor *x_grad,
DenseTensor *updates_grad) { DenseTensor *updates_grad) {
......
...@@ -364,13 +364,18 @@ void ElementwiseFMinGradKernel(const Context& dev_ctx, ...@@ -364,13 +364,18 @@ void ElementwiseFMinGradKernel(const Context& dev_ctx,
template <typename T> template <typename T>
struct MulGradDX { struct MulGradDX {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout * y; } HOSTDEVICE T operator()(T x UNUSED, T y, T out UNUSED, T dout) const {
return dout * y;
}
}; };
// avoid [-Wint-in-bool-context] warning // avoid [-Wint-in-bool-context] warning
template <> template <>
struct MulGradDX<bool> { struct MulGradDX<bool> {
HOSTDEVICE bool operator()(bool x, bool y, bool out, bool dout) const { HOSTDEVICE bool operator()(bool x UNUSED,
bool y,
bool out UNUSED,
bool dout) const {
return dout && y; return dout && y;
} }
}; };
...@@ -395,13 +400,18 @@ struct MulGradDX<phi::dtype::complex<T>> { ...@@ -395,13 +400,18 @@ struct MulGradDX<phi::dtype::complex<T>> {
template <typename T> template <typename T>
struct MulGradDY { struct MulGradDY {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout * x; } HOSTDEVICE T operator()(T x, T y UNUSED, T out UNUSED, T dout) const {
return dout * x;
}
}; };
// avoid [-Wint-in-bool-context] warning // avoid [-Wint-in-bool-context] warning
template <> template <>
struct MulGradDY<bool> { struct MulGradDY<bool> {
HOSTDEVICE bool operator()(bool x, bool y, bool out, bool dout) const { HOSTDEVICE bool operator()(bool x,
bool y UNUSED,
bool out UNUSED,
bool dout) const {
return dout && x; return dout && x;
} }
}; };
...@@ -826,14 +836,14 @@ void MultiplyTripleGradKernel(const Context& dev_ctx, ...@@ -826,14 +836,14 @@ void MultiplyTripleGradKernel(const Context& dev_ctx,
template <typename T> template <typename T>
struct MaxGradDx { struct MaxGradDx {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>(x > y); return dout * static_cast<T>(x > y);
} }
}; };
template <typename T> template <typename T>
struct MaxGradDy { struct MaxGradDy {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>(x <= y); return dout * static_cast<T>(x <= y);
} }
}; };
...@@ -845,14 +855,14 @@ struct MaxGradDy { ...@@ -845,14 +855,14 @@ struct MaxGradDy {
*/ */
template <typename T> template <typename T>
struct MinGradDx { struct MinGradDx {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>(x < y); return dout * static_cast<T>(x < y);
} }
}; };
template <typename T> template <typename T>
struct MinGradDy { struct MinGradDy {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>(x >= y); return dout * static_cast<T>(x >= y);
} }
}; };
...@@ -924,14 +934,14 @@ compute_pow_grad_dy(T x, T y, T out, T dout) { ...@@ -924,14 +934,14 @@ compute_pow_grad_dy(T x, T y, T out, T dout) {
} }
#else #else
template <typename T, typename MPType> template <typename T, typename MPType>
HOSTDEVICE T compute_pow_grad_dx(T x, T y, T out, T dout) { HOSTDEVICE T compute_pow_grad_dx(T x, T y, T out UNUSED, T dout) {
MPType x_val = static_cast<MPType>(x); MPType x_val = static_cast<MPType>(x);
MPType y_val = static_cast<MPType>(y); MPType y_val = static_cast<MPType>(y);
return static_cast<T>(static_cast<MPType>(dout) * y_val * return static_cast<T>(static_cast<MPType>(dout) * y_val *
std::pow(x_val, y_val - 1)); std::pow(x_val, y_val - 1));
} }
template <typename T, typename MPType> template <typename T, typename MPType>
HOSTDEVICE T compute_pow_grad_dy(T x, T y, T out, T dout) { HOSTDEVICE T compute_pow_grad_dy(T x, T y, T out UNUSED, T dout) {
MPType x_val = static_cast<MPType>(x); MPType x_val = static_cast<MPType>(x);
MPType y_val = static_cast<MPType>(y); MPType y_val = static_cast<MPType>(y);
return static_cast<T>(static_cast<MPType>(dout) * std::log(x_val) * return static_cast<T>(static_cast<MPType>(dout) * std::log(x_val) *
......
...@@ -37,7 +37,7 @@ template <typename T, typename Context> ...@@ -37,7 +37,7 @@ template <typename T, typename Context>
void EyeKernel(const Context& ctx, void EyeKernel(const Context& ctx,
const Scalar& num_rows, const Scalar& num_rows,
const Scalar& num_columns, const Scalar& num_columns,
DataType dtype, DataType dtype UNUSED,
DenseTensor* out) { DenseTensor* out) {
auto columns = num_columns.to<int64_t>(); auto columns = num_columns.to<int64_t>();
auto rows = num_rows.to<int64_t>(); auto rows = num_rows.to<int64_t>();
......
...@@ -24,7 +24,7 @@ namespace phi { ...@@ -24,7 +24,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void FillKernel(const Context& dev_ctx, void FillKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const Scalar& value, const Scalar& value,
DenseTensor* out) { DenseTensor* out) {
T fill_var = value.to<T>(); T fill_var = value.to<T>();
......
...@@ -239,8 +239,8 @@ void NearestInterpKernel( ...@@ -239,8 +239,8 @@ void NearestInterpKernel(
int out_w, int out_w,
const std::vector<float>& scale, const std::vector<float>& scale,
const std::string& interp_method, const std::string& interp_method,
bool align_corners, bool align_corners UNUSED,
int align_mode, int align_mode UNUSED,
DenseTensor* output) { DenseTensor* output) {
InterpolateKernel<T, Context>(ctx, InterpolateKernel<T, Context>(ctx,
x, x,
......
...@@ -39,7 +39,7 @@ void Unsqueeze(const Context& dev_ctx, ...@@ -39,7 +39,7 @@ void Unsqueeze(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const IntArray& axes, const IntArray& axes,
DenseTensor* out, DenseTensor* out,
DenseTensor* xshape) { DenseTensor* xshape UNUSED) {
MetaTensor meta_out(out); MetaTensor meta_out(out);
UnsqueezeInferMeta(x, axes, &meta_out); UnsqueezeInferMeta(x, axes, &meta_out);
UnsqueezeInferKernel<T, Context>(dev_ctx, x, axes, out); UnsqueezeInferKernel<T, Context>(dev_ctx, x, axes, out);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册