未验证 提交 96188fc1 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-paramter]warning (#53681)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 a9c3e32d
......@@ -28,7 +28,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class SendOpV2CPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support send for cpu kernel now."));
}
......
......@@ -33,7 +33,7 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler<T, dnnl::gru_forward, T_out> {
GRUMKLDNNHandler(const paddle::framework::ExecutionContext& ctx,
const OneDNNContext& dev_ctx,
const dnnl::engine onednn_engine,
platform::Place cpu_place,
platform::Place cpu_place UNUSED,
const phi::DenseTensor* input,
const phi::DenseTensor* weight_h,
const phi::DenseTensor* h0,
......@@ -42,7 +42,7 @@ class GRUMKLDNNHandler : public RNNMKLDNNHandler<T, dnnl::gru_forward, T_out> {
const int64_t Ti,
const int64_t IC,
const int64_t OC,
const std::string& unique_name)
const std::string& unique_name UNUSED)
: RNNMKLDNNHandler<T, dnnl::gru_forward, T_out>(
ctx,
dev_ctx,
......
......@@ -33,17 +33,17 @@ class LSTMMKLDNNHandler
LSTMMKLDNNHandler(const paddle::framework::ExecutionContext& ctx,
const OneDNNContext& dev_ctx,
const dnnl::engine onednn_engine,
platform::Place cpu_place,
platform::Place cpu_place UNUSED,
const phi::DenseTensor* input,
const phi::DenseTensor* weight_h,
const phi::DenseTensor* h0,
const phi::DenseTensor* c0,
const phi::DenseTensor* c0 UNUSED,
const bool is_reverse,
const int64_t N,
const int64_t Ti,
const int64_t IC,
const int64_t OC,
const std::string& unique_name)
const std::string& unique_name UNUSED)
: RNNMKLDNNHandler<T, dnnl::lstm_forward, T_out>(
ctx,
dev_ctx,
......
......@@ -66,7 +66,7 @@ class MemcpyH2DFunctor {
}
template <typename T>
void operator()(const T &v) const {
void operator()(const T &v UNUSED) const {
PADDLE_ENFORCE_EQ(
true,
false,
......
......@@ -81,7 +81,7 @@ class MemcpyFunctor {
}
template <typename T>
void operator()(const T &v) const {
void operator()(const T &v UNUSED) const {
PADDLE_ENFORCE_EQ(
true,
false,
......
......@@ -318,11 +318,11 @@ void Copy(const Context& dev_ctx,
}
template <typename Context>
void Copy(const Context& dev_ctx,
const TensorArray& src,
Place dst_place,
bool blocking,
TensorArray* dst) {
void Copy(const Context& dev_ctx UNUSED,
const TensorArray& src UNUSED,
Place dst_place UNUSED,
bool blocking UNUSED,
TensorArray* dst UNUSED) {
// NOTE(Ruibiao): implements Copy() for TensorArray when needed.
PADDLE_THROW(errors::Unimplemented("Copy for TensorArray is unimplemented."));
}
......
......@@ -23,7 +23,7 @@ namespace phi {
template <typename T, typename Context>
void DiagGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& out_grad,
int offset,
DenseTensor* x_grad) {
......
......@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context>
void FillDiagonalGradKernel(const Context& ctx,
const DenseTensor& out_grad,
float value,
float value UNUSED,
int offset,
bool wrap,
DenseTensor* x_grad) {
......
......@@ -23,7 +23,7 @@ namespace phi {
template <typename T, typename Context>
void GatherNdGradKernel(const Context &ctx,
const DenseTensor &x,
const DenseTensor &x UNUSED,
const DenseTensor &index,
const DenseTensor &out_grad,
DenseTensor *x_grad) {
......
......@@ -23,7 +23,7 @@ namespace phi {
template <typename T, typename Context>
void IndexAddGradKernel(const Context& ctx,
const DenseTensor& index,
const DenseTensor& add_value,
const DenseTensor& add_value UNUSED,
const DenseTensor& out_grad,
int axis,
DenseTensor* x_grad,
......
......@@ -22,7 +22,7 @@ namespace phi {
template <typename T, typename Context>
void IndexSelectGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& index,
const DenseTensor& out_grad,
int dim,
......
......@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context>
void MeanAllGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
PADDLE_ENFORCE_EQ(out_grad.numel(),
......
......@@ -25,11 +25,11 @@ namespace phi {
template <typename T, typename Context>
void PutAlongAxisGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& index,
const DenseTensor& out_grad,
int axis,
const std::string& reduce,
const std::string& reduce UNUSED,
DenseTensor* x_grad,
DenseTensor* value_grad) {
PADDLE_ENFORCE_EQ(
......
......@@ -24,7 +24,7 @@ namespace phi {
template <typename T, typename Context>
void ScatterNdAddGradKernel(const Context &ctx,
const DenseTensor &index,
const DenseTensor &updates,
const DenseTensor &updates UNUSED,
const DenseTensor &out_grad,
DenseTensor *x_grad,
DenseTensor *updates_grad) {
......
......@@ -364,13 +364,18 @@ void ElementwiseFMinGradKernel(const Context& dev_ctx,
template <typename T>
struct MulGradDX {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout * y; }
HOSTDEVICE T operator()(T x UNUSED, T y, T out UNUSED, T dout) const {
return dout * y;
}
};
// avoid [-Wint-in-bool-context] warning
template <>
struct MulGradDX<bool> {
HOSTDEVICE bool operator()(bool x, bool y, bool out, bool dout) const {
HOSTDEVICE bool operator()(bool x UNUSED,
bool y,
bool out UNUSED,
bool dout) const {
return dout && y;
}
};
......@@ -395,13 +400,18 @@ struct MulGradDX<phi::dtype::complex<T>> {
template <typename T>
struct MulGradDY {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout * x; }
HOSTDEVICE T operator()(T x, T y UNUSED, T out UNUSED, T dout) const {
return dout * x;
}
};
// avoid [-Wint-in-bool-context] warning
template <>
struct MulGradDY<bool> {
HOSTDEVICE bool operator()(bool x, bool y, bool out, bool dout) const {
HOSTDEVICE bool operator()(bool x,
bool y UNUSED,
bool out UNUSED,
bool dout) const {
return dout && x;
}
};
......@@ -826,14 +836,14 @@ void MultiplyTripleGradKernel(const Context& dev_ctx,
template <typename T>
struct MaxGradDx {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>(x > y);
}
};
template <typename T>
struct MaxGradDy {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>(x <= y);
}
};
......@@ -845,14 +855,14 @@ struct MaxGradDy {
*/
template <typename T>
struct MinGradDx {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>(x < y);
}
};
template <typename T>
struct MinGradDy {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x, T y, T out UNUSED, T dout) const {
return dout * static_cast<T>(x >= y);
}
};
......@@ -924,14 +934,14 @@ compute_pow_grad_dy(T x, T y, T out, T dout) {
}
#else
template <typename T, typename MPType>
HOSTDEVICE T compute_pow_grad_dx(T x, T y, T out, T dout) {
HOSTDEVICE T compute_pow_grad_dx(T x, T y, T out UNUSED, T dout) {
MPType x_val = static_cast<MPType>(x);
MPType y_val = static_cast<MPType>(y);
return static_cast<T>(static_cast<MPType>(dout) * y_val *
std::pow(x_val, y_val - 1));
}
template <typename T, typename MPType>
HOSTDEVICE T compute_pow_grad_dy(T x, T y, T out, T dout) {
HOSTDEVICE T compute_pow_grad_dy(T x, T y, T out UNUSED, T dout) {
MPType x_val = static_cast<MPType>(x);
MPType y_val = static_cast<MPType>(y);
return static_cast<T>(static_cast<MPType>(dout) * std::log(x_val) *
......
......@@ -37,7 +37,7 @@ template <typename T, typename Context>
void EyeKernel(const Context& ctx,
const Scalar& num_rows,
const Scalar& num_columns,
DataType dtype,
DataType dtype UNUSED,
DenseTensor* out) {
auto columns = num_columns.to<int64_t>();
auto rows = num_rows.to<int64_t>();
......
......@@ -24,7 +24,7 @@ namespace phi {
template <typename T, typename Context>
void FillKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const Scalar& value,
DenseTensor* out) {
T fill_var = value.to<T>();
......
......@@ -239,8 +239,8 @@ void NearestInterpKernel(
int out_w,
const std::vector<float>& scale,
const std::string& interp_method,
bool align_corners,
int align_mode,
bool align_corners UNUSED,
int align_mode UNUSED,
DenseTensor* output) {
InterpolateKernel<T, Context>(ctx,
x,
......
......@@ -39,7 +39,7 @@ void Unsqueeze(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& axes,
DenseTensor* out,
DenseTensor* xshape) {
DenseTensor* xshape UNUSED) {
MetaTensor meta_out(out);
UnsqueezeInferMeta(x, axes, &meta_out);
UnsqueezeInferKernel<T, Context>(dev_ctx, x, axes, out);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册