未验证 提交 dbb62692 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-parameter] warning (#53683)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 2f56b6da
......@@ -28,7 +28,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class GlobalGatherOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support global gather op for cpu kernel now."));
}
......
......@@ -28,7 +28,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class GlobalScatterOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support global scatter op for cpu kernel now."));
}
......
......@@ -29,7 +29,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class PartialAllGatherOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support partial_allgather for cpu kernel now."));
}
......
......@@ -27,7 +27,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class PartialRecvOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support partial_recv for cpu kernel now."));
}
......
......@@ -28,7 +28,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class PartialSendOpCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support partial_send for cpu kernel now."));
}
......
......@@ -27,7 +27,7 @@ namespace operators {
template <typename T, typename DeviceContext>
class RecvOpV2CPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
void Compute(const framework::ExecutionContext& ctx UNUSED) const override {
PADDLE_THROW(platform::errors::Unavailable(
"Do not support recv for cpu kernel now."));
}
......
......@@ -27,8 +27,8 @@ template <typename T, typename Context>
void EigvalshGradKernel(const Context& dev_ctx,
const DenseTensor& out_v,
const DenseTensor& out_w_grad,
const std::string& uplo,
bool is_test,
const std::string& uplo UNUSED,
bool is_test UNUSED,
DenseTensor* x_grad) {
auto tV = phi::TransposeLast2Dim<T>(dev_ctx, phi::Conj<T>(dev_ctx, out_v));
......
......@@ -752,7 +752,7 @@ void EinsumKernel(const Context& dev_ctx,
const std::string& equation,
DenseTensor* out,
std::vector<DenseTensor*> cache,
std::vector<DenseTensor*> xshape) {
std::vector<DenseTensor*> xshape UNUSED) {
std::vector<char> tmp;
// for the sake of compatibility, we may load and run v2.3 EinsumOp. Output
// may have nullptr and the cache.size() is not equal to inputs.size(). refer
......
......@@ -119,7 +119,9 @@ void SubtractDoubleGradImpl(const Context& dev_ctx,
template <typename T>
struct DivGradDX {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout / y; }
HOSTDEVICE T operator()(T x UNUSED, T y, T out UNUSED, T dout) const {
return dout / y;
}
};
template <typename T>
......@@ -136,7 +138,7 @@ struct DivGradDX<phi::dtype::complex<T>> {
template <typename T>
struct DivGradDY {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x UNUSED, T y, T out, T dout) const {
return -dout * out / y;
}
};
......@@ -857,14 +859,14 @@ struct MinGradDy {
template <typename T>
struct HeavisideGradDx {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x UNUSED, T y UNUSED, T out UNUSED, T dout) const {
return dout * static_cast<T>(0);
}
};
template <typename T>
struct HeavisideGradDy {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
HOSTDEVICE T operator()(T x, T y UNUSED, T out UNUSED, T dout) const {
return dout * static_cast<T>(x == static_cast<T>(0));
}
};
......
......@@ -128,7 +128,7 @@ void ComputeImpl(const Context& dev_ctx,
float beta1_f,
float beta2_f,
float epsilon_f,
bool multi_precision,
bool multi_precision UNUSED,
DenseTensor* param_out,
DenseTensor* mom1_out,
DenseTensor* mom2_out,
......
......@@ -474,7 +474,7 @@ void Unpack_Pivot(const Context& dev_ctx,
const DenseTensor& Pivot,
DenseTensor* P,
int h,
int w) {
int w UNUSED) {
auto dims = Pivot.dims();
auto Pdimvec = vectorize(dims);
auto prank = Pdimvec.size();
......
......@@ -26,7 +26,7 @@ template <typename T, typename Context>
void UnStackKernel(const Context &dev_ctx,
const DenseTensor &x,
int axis,
int num,
int num UNUSED,
std::vector<DenseTensor *> outs) {
auto *dy = &x;
auto dx = outs;
......
......@@ -240,10 +240,10 @@ class ConvOneDNNHandlerT
const std::string& padding_algorithm,
const std::vector<int>& dilations_in,
int groups,
const std::string& data_format,
const std::string& data_format UNUSED,
bool is_test,
phi::DenseTensor* filter_grad,
phi::DenseTensor* in_x_grad,
phi::DenseTensor* filter_grad UNUSED,
phi::DenseTensor* in_x_grad UNUSED,
const std::string& unique_name)
: funcs::OneDNNHandlerT<T,
dnnl::convolution_forward,
......
......@@ -26,7 +26,7 @@ void Pool2dKernel(const Context& dev_ctx,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& data_format UNUSED,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
......
......@@ -118,7 +118,7 @@ void ReduceGradKernel(const Context& dev_ctx,
bool reduce_all,
DenseTensor* x_grad,
dnnl::algorithm binary_type,
dnnl::algorithm reduction_type,
dnnl::algorithm reduction_type UNUSED,
float scale_x,
float scale_y) {
reduce_all = recompute_reduce_all(x, dims, reduce_all);
......
......@@ -25,7 +25,7 @@ void SliceKernel(const Context& dev_ctx,
const std::vector<int64_t>& axes,
const IntArray& starts,
const IntArray& ends,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& infer_flags UNUSED,
const std::vector<int64_t>& decrease_axis,
DenseTensor* out) {
const auto& onednn_engine = dev_ctx.GetEngine();
......
......@@ -131,7 +131,7 @@ void ComputeRowImpl(const Context& dev_ctx,
float beta1_f,
float beta2_f,
float epsilon_f,
bool multi_precision,
bool multi_precision UNUSED,
DenseTensor* param_out,
DenseTensor* mom1_out,
DenseTensor* mom2_out,
......
......@@ -38,10 +38,10 @@ void Conv3dCooGradCPUKernel(const CPUContext& dev_ctx,
const DenseTensor& rulebook,
const DenseTensor& counter,
const SparseCooTensor& out_grad,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const std::vector<int>& paddings UNUSED,
const std::vector<int>& dilations UNUSED,
const std::vector<int>& strides UNUSED,
const int groups UNUSED,
const bool subm,
const std::string& key,
SparseCooTensor* x_grad,
......
......@@ -34,7 +34,7 @@ void Conv3dCooCPUKernel(const CPUContext& dev_ctx,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
const std::vector<int>& strides,
const int groups,
const int groups UNUSED,
const bool subm,
const std::string& key,
SparseCooTensor* out,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册