未验证 提交 0fac3281 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-parameter] warning (#53365)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 2e1ac529
......@@ -486,7 +486,7 @@ class ProcessGroup {
virtual std::shared_ptr<ProcessGroup::Task> Reduce(
std::vector<phi::DenseTensor>&, // NOLINT
std::vector<phi::DenseTensor>&, // NOLINT
const ReduceOptions& opts) {
const ReduceOptions& opts UNUSED) {
PADDLE_THROW(phi::errors::InvalidArgument(
"ProcessGroup%s does not support reduce", GetBackendName()));
}
......
......@@ -30,7 +30,7 @@
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/phi/core/macros.h"
namespace brpc {
class Channel;
class Controller;
......@@ -63,7 +63,7 @@ class DownpourPsClientService : public PsService {
PsResponseMessage *response,
::google::protobuf::Closure *done);
virtual void FLService(::google::protobuf::RpcController *controller,
virtual void FLService(::google::protobuf::RpcController *controller UNUSED,
const CoordinatorReqMessage *request,
CoordinatorResMessage *response,
::google::protobuf::Closure *done) {
......
......@@ -457,8 +457,8 @@ inline void RunProgramAPI(
}
inline void RunProgramGradAPI(
const std::vector<paddle::Tensor> &x,
const std::vector<paddle::Tensor> &params,
const std::vector<paddle::Tensor> &x UNUSED,
const std::vector<paddle::Tensor> &params UNUSED,
const std::vector<paddle::Tensor> &out_grad,
const std::vector<paddle::framework::Scope *> &step_scope, // NOLINT
const paddle::framework::AttributeMap &attrs,
......@@ -610,8 +610,8 @@ class GradNodeRunProgram : public egr::GradNodeBase {
egr::kSlotSmallVectorSize>
operator()(paddle::small_vector<std::vector<paddle::Tensor>,
egr::kSlotSmallVectorSize> &grads, // NOLINT
bool create_graph,
bool is_new_grad) override {
bool create_graph UNUSED,
bool is_new_grad UNUSED) override {
VLOG(3) << "Running Eager Backward Node: GradNodeRunProgram";
paddle::small_vector<std::vector<paddle::Tensor>, egr::kSlotSmallVectorSize>
hooked_grads = GradNodeRunProgram::ApplyGradientHooks(grads);
......
......@@ -58,7 +58,7 @@ class Dataset {
const uint16_t start_sample_layer UNUSED,
const bool with_hierachy UNUSED,
const uint16_t seed_ UNUSED,
const uint16_t sample_slot) {}
const uint16_t sample_slot UNUSED) {}
// set file list
virtual void SetFileList(const std::vector<std::string>& filelist) = 0;
// set readers' num
......
......@@ -44,7 +44,7 @@ class FCMKLDNNHandler
const phi::DenseTensor* x,
const phi::DenseTensor* weights,
const phi::DenseTensor* bias,
phi::DenseTensor* out,
phi::DenseTensor* out UNUSED,
const int in_num_col_dims,
dnnl::engine onednn_engine,
platform::Place cpu_place)
......
......@@ -30,7 +30,7 @@ void EmptyKernel(const Context& dev_ctx,
template <typename T, typename Context>
void EmptyLikeKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
DataType dtype UNUSED,
DenseTensor* out) {
dev_ctx.template Alloc<T>(out);
......
......@@ -25,8 +25,8 @@ namespace phi {
template <typename T, typename Context>
void FlattenInferKernel(const Context& dev_ctx,
const DenseTensor& x,
int start_axis,
int stop_axis,
int start_axis UNUSED,
int stop_axis UNUSED,
DenseTensor* out) {
dev_ctx.Alloc(out, x.dtype());
auto out_dims = out->dims();
......@@ -43,7 +43,7 @@ void FlattenKernel(const Context& dev_ctx,
int start_axis,
int stop_axis,
DenseTensor* out,
DenseTensor* xshape) {
DenseTensor* xshape UNUSED) {
FlattenInferKernel<T, Context>(dev_ctx, x, start_axis, stop_axis, out);
}
......
......@@ -1198,7 +1198,7 @@ struct TanhGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
dx.device(d) = dout * (static_cast<T>(1) - out * out);
}
......@@ -1794,7 +1794,7 @@ struct SigmoidGradFunctor : public BaseActivationFunctor<T> {
typename Out,
typename dOut,
typename dX>
void operator()(Device d, X x, Out out, dOut dout, dX dx) const {
void operator()(Device d, X x UNUSED, Out out, dOut dout, dX dx) const {
dx.device(d) = dout * out * (static_cast<T>(1) - out);
}
......
......@@ -96,12 +96,12 @@ struct BinaryCompoundGradDyFunctor {
unary_fun_(unary_fun),
d_unary_fun_(d_unary_fun) {}
inline HOSTDEVICE T Recompute(T x, T y, T out, T dout) {
inline HOSTDEVICE T Recompute(T x, T y, T out UNUSED, T dout) {
return dout * d_binary_fun_.Dy(x, unary_fun_(y)) * d_unary_fun_.UseX(y);
}
inline HOSTDEVICE T
UseIntermediateOut(T x, T y, T intermediate_out, T out, T dout) {
UseIntermediateOut(T x, T y, T intermediate_out, T out UNUSED, T dout) {
if (InPlace) {
return dout * d_binary_fun_.Dy(x, intermediate_out) *
d_unary_fun_.UseOut(intermediate_out);
......@@ -111,7 +111,9 @@ struct BinaryCompoundGradDyFunctor {
}
}
inline HOSTDEVICE T GetIntermediateOut(T x, T y) { return unary_fun_(y); }
inline HOSTDEVICE T GetIntermediateOut(T x UNUSED, T y) {
return unary_fun_(y);
}
private:
DBinaryFun d_binary_fun_;
......
......@@ -20,7 +20,7 @@ limitations under the License. */
#include "paddle/phi/backends/cpu/cpu_info.h"
#include "paddle/phi/core/hostdevice.h"
#include "paddle/phi/core/macros.h"
namespace phi {
namespace funcs {
namespace detail {
......@@ -104,7 +104,7 @@ DEVICE T TanhV2(const T a) {
namespace backward {
template <typename T>
DEVICE T Identity(const T a, const T b) {
DEVICE T Identity(const T a, const T b UNUSED) {
return a;
}
......
......@@ -21,8 +21,8 @@ limitations under the License. */
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/macros.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
namespace funcs {
......@@ -34,7 +34,7 @@ namespace funcs {
* return: output tensor
*/
template <typename T, typename IndexT = int>
void CPUGather(const phi::CPUContext& ctx,
void CPUGather(const phi::CPUContext& ctx UNUSED,
const DenseTensor& src,
const DenseTensor& index,
DenseTensor* output) {
......@@ -95,7 +95,7 @@ void CPUGather(const phi::CPUContext& ctx,
}
template <typename T, typename IndexT = int>
void CPUGatherNd(const phi::CPUContext& ctx,
void CPUGatherNd(const phi::CPUContext& ctx UNUSED,
const DenseTensor& input,
const DenseTensor& index,
DenseTensor* output) {
......
......@@ -74,7 +74,7 @@ inline typename std::enable_if<
!std::is_same<typename KernelTuple::data_type, float>::value ||
!std::is_same<PlaceType, phi::CPUPlace>::value,
const Kernel*>::type
GetJitCode(const typename KernelTuple::attr_type& attr) {
GetJitCode(const typename KernelTuple::attr_type& attr UNUSED) {
return nullptr;
}
......
......@@ -153,7 +153,7 @@ struct ProdGradFunctor {
DX* dx,
DY* dy,
const Dim& dim,
int size) {
int size UNUSED) {
dx->device(place) = dy->broadcast(dim) * y->broadcast(dim) * x->inverse();
}
};
......
......@@ -47,7 +47,7 @@ elementwise_inner_add(const phi::CPUContext& ctx,
template <typename T, typename IndexT = int>
typename std::enable_if<!std::is_floating_point<T>::value>::type
elementwise_inner_add(const phi::CPUContext& ctx,
elementwise_inner_add(const phi::CPUContext& ctx UNUSED,
const T* src_pointer,
T* dst_pointer,
size_t src_index,
......
......@@ -12,9 +12,9 @@ limitations under the License. */
#pragma once
#include <vector>
#include "paddle/phi/kernels/funcs/detail/strided_memcpy.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/macros.h"
#include "paddle/phi/kernels/funcs/detail/strided_memcpy.h"
namespace phi {
class CPUContext;
......@@ -65,7 +65,7 @@ inline void CopyWithContext(const Context& ctx,
}
template <>
inline void CopyWithContext<phi::CPUContext>(const phi::CPUContext& ctx,
inline void CopyWithContext<phi::CPUContext>(const phi::CPUContext& ctx UNUSED,
const Place& dst_place,
void* dst,
const Place& src_place,
......
......@@ -23,7 +23,7 @@ template <typename T, typename Context>
void SqueezeGradKernel(const Context& dev_ctx,
const DenseTensor& xshape,
const DenseTensor& dout,
const IntArray& axes,
const IntArray& axes UNUSED,
DenseTensor* dx) {
auto xshape_dims = xshape.dims();
auto x_dims = phi::slice_ddim(xshape_dims, 1, xshape_dims.size());
......
......@@ -23,7 +23,7 @@ namespace phi {
template <typename T, typename Context>
void SqueezeInferKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& axes,
const IntArray& axes UNUSED,
DenseTensor* out) {
auto out_dims = out->dims();
dev_ctx.template Alloc<T>(out);
......@@ -39,7 +39,7 @@ void SqueezeKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& axes,
DenseTensor* out,
DenseTensor* xshape) {
DenseTensor* xshape UNUSED) {
SqueezeInferKernel<T, Context>(dev_ctx, x, axes, out);
}
......
......@@ -44,7 +44,7 @@ void UnsqueezeKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& axes,
DenseTensor* out,
DenseTensor* xshape) {
DenseTensor* xshape UNUSED) {
UnsqueezeInferKernel<T, Context>(dev_ctx, x, axes, out);
}
} // namespace phi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册