未验证 提交 8ed01e8d 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunsed-parameter] warning (#53687)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 ca2ea162
...@@ -32,7 +32,7 @@ struct UnrollFillConstant { ...@@ -32,7 +32,7 @@ struct UnrollFillConstant {
template <size_t kStart, size_t kEnd> template <size_t kStart, size_t kEnd>
struct UnrollFillConstant<kStart, kEnd, true> { struct UnrollFillConstant<kStart, kEnd, true> {
template <typename T> template <typename T>
HOSTDEVICE inline static void Run(T *data, T val) {} HOSTDEVICE inline static void Run(T *data UNUSED, T val UNUSED) {}
}; };
template <size_t kStart, size_t kEnd, bool kStop> template <size_t kStart, size_t kEnd, bool kStop>
......
...@@ -31,7 +31,7 @@ namespace phi { ...@@ -31,7 +31,7 @@ namespace phi {
template <typename T, bool IsFoundInfOnCPU> template <typename T, bool IsFoundInfOnCPU>
class UpdateLossScalingFunctor<phi::CPUContext, T, IsFoundInfOnCPU> { class UpdateLossScalingFunctor<phi::CPUContext, T, IsFoundInfOnCPU> {
public: public:
void operator()(const phi::CPUContext& ctx, void operator()(const phi::CPUContext& ctx UNUSED,
const bool* found_inf_data, const bool* found_inf_data,
const T* pre_loss_scaling_data, const T* pre_loss_scaling_data,
const int* good_in_data, const int* good_in_data,
......
...@@ -55,7 +55,7 @@ void ArgsortGradKernel(const Context& dev_ctx, ...@@ -55,7 +55,7 @@ void ArgsortGradKernel(const Context& dev_ctx,
const DenseTensor& input, const DenseTensor& input,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int axis, int axis,
bool descending, bool descending UNUSED,
DenseTensor* in_grad) { DenseTensor* in_grad) {
auto in_dims = indices.dims(); auto in_dims = indices.dims();
auto rank = input.dims().size(); auto rank = input.dims().size();
......
...@@ -122,10 +122,10 @@ struct SameDimsDivideFunctor< ...@@ -122,10 +122,10 @@ struct SameDimsDivideFunctor<
DevCtx, DevCtx,
T, T,
typename std::enable_if<!std::is_floating_point<T>::value>::type> { typename std::enable_if<!std::is_floating_point<T>::value>::type> {
void operator()(const DevCtx& dev_ctx, void operator()(const DevCtx& dev_ctx UNUSED,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& y, const DenseTensor& y UNUSED,
DenseTensor* z) { DenseTensor* z UNUSED) {
phi::errors::InvalidArgument( phi::errors::InvalidArgument(
"If use SameDimsDivideFunctor, template args(T) must be floating " "If use SameDimsDivideFunctor, template args(T) must be floating "
"point. "); "point. ");
......
...@@ -31,7 +31,8 @@ void HSigmoidLossGradKernelImpl(const Context& ctx, ...@@ -31,7 +31,8 @@ void HSigmoidLossGradKernelImpl(const Context& ctx,
const DenseTensor& label, const DenseTensor& label,
const paddle::optional<DenseTensor>& path, const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code, const paddle::optional<DenseTensor>& code,
const paddle::optional<DenseTensor>& bias, const paddle::optional<DenseTensor>& bias
UNUSED,
const DenseTensor& pre_out, const DenseTensor& pre_out,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int num_classes, int num_classes,
......
...@@ -69,7 +69,7 @@ void IndexSampleGradInner(const Context& context, ...@@ -69,7 +69,7 @@ void IndexSampleGradInner(const Context& context,
template <typename T, typename Context> template <typename T, typename Context>
void IndexSampleGradKernel(const Context& ctx, void IndexSampleGradKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& index, const DenseTensor& index,
const DenseTensor& out_grad, const DenseTensor& out_grad,
DenseTensor* x_grad) { DenseTensor* x_grad) {
......
...@@ -26,7 +26,7 @@ namespace phi { ...@@ -26,7 +26,7 @@ namespace phi {
template <typename Context, typename T, class Enable = void> template <typename Context, typename T, class Enable = void>
struct IndexSelectAdd { struct IndexSelectAdd {
void operator()(const Context& ctx, void operator()(const Context& ctx UNUSED,
int slice_size, int slice_size,
const T* src_pointer, const T* src_pointer,
const T* p_pointer, const T* p_pointer,
......
...@@ -49,7 +49,7 @@ void KthvalueGradKernel(const Context& dev_ctx, ...@@ -49,7 +49,7 @@ void KthvalueGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& indices, const DenseTensor& indices,
const DenseTensor& d_out, const DenseTensor& d_out,
int k, int k UNUSED,
int axis, int axis,
bool keepdim, bool keepdim,
DenseTensor* d_x) { DenseTensor* d_x) {
......
...@@ -21,7 +21,7 @@ namespace phi { ...@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void MaskedSelectGradKernel(const Context& dev_ctx, void MaskedSelectGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& mask, const DenseTensor& mask,
const DenseTensor& out_grad, const DenseTensor& out_grad,
DenseTensor* x_grad) { DenseTensor* x_grad) {
......
...@@ -26,7 +26,7 @@ void CalcMedianGradKernel(const Context& dev_ctx, ...@@ -26,7 +26,7 @@ void CalcMedianGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& median_index, const DenseTensor& median_index,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const IntArray& axes, const IntArray& axes UNUSED,
DenseTensor* x_grad, DenseTensor* x_grad,
T* x_grad_ptr) { T* x_grad_ptr) {
phi::funcs::SetConstant<Context, T> set_zero; phi::funcs::SetConstant<Context, T> set_zero;
...@@ -83,7 +83,7 @@ void NanmedianGradKernel(const Context& dev_ctx, ...@@ -83,7 +83,7 @@ void NanmedianGradKernel(const Context& dev_ctx,
const DenseTensor& median_index, const DenseTensor& median_index,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const IntArray& axes, const IntArray& axes,
bool keep_dim, bool keep_dim UNUSED,
DenseTensor* x_grad) { DenseTensor* x_grad) {
BaseMedianGradKernel<T, Context>( BaseMedianGradKernel<T, Context>(
dev_ctx, input, median_index, out_grad, axes, x_grad); dev_ctx, input, median_index, out_grad, axes, x_grad);
......
...@@ -192,7 +192,7 @@ template <typename T, typename Context> ...@@ -192,7 +192,7 @@ template <typename T, typename Context>
void NanmedianKernel(const Context& dev_ctx, void NanmedianKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const IntArray& axes, const IntArray& axes,
bool keepdim, bool keepdim UNUSED,
DenseTensor* out, DenseTensor* out,
DenseTensor* median_index) { DenseTensor* median_index) {
BaseMedianKernel<T, Context>(dev_ctx, x, axes, out, median_index, true); BaseMedianKernel<T, Context>(dev_ctx, x, axes, out, median_index, true);
......
...@@ -21,7 +21,7 @@ namespace phi { ...@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void RandpermKernel(const Context& dev_ctx, void RandpermKernel(const Context& dev_ctx,
int n, int n,
DataType dtype, DataType dtype UNUSED,
DenseTensor* out) { DenseTensor* out) {
T* out_data = dev_ctx.template Alloc<T>(out); T* out_data = dev_ctx.template Alloc<T>(out);
int seed = 0; int seed = 0;
......
...@@ -26,7 +26,7 @@ namespace phi { ...@@ -26,7 +26,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void RepeatInterleaveWithTensorIndexGradKernel( void RepeatInterleaveWithTensorIndexGradKernel(
const Context& ctx, const Context& ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& repeats_tensor, const DenseTensor& repeats_tensor,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int dim, int dim,
...@@ -74,7 +74,7 @@ void RepeatInterleaveWithTensorIndexGradKernel( ...@@ -74,7 +74,7 @@ void RepeatInterleaveWithTensorIndexGradKernel(
template <typename T, typename Context> template <typename T, typename Context>
void RepeatInterleaveGradKernel(const Context& ctx, void RepeatInterleaveGradKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int repeats, int repeats,
int dim, int dim,
......
...@@ -164,19 +164,19 @@ template <typename T, template <typename> class EigenActivationBackwardFunctor> ...@@ -164,19 +164,19 @@ template <typename T, template <typename> class EigenActivationBackwardFunctor>
struct SimpleRNNGradCell : GradCell<T> { struct SimpleRNNGradCell : GradCell<T> {
void operator()(const CPUContext& dev_ctx, void operator()(const CPUContext& dev_ctx,
DenseTensor* gate_tensor, DenseTensor* gate_tensor,
DenseTensor* state_tensor, DenseTensor* state_tensor UNUSED,
DenseTensor* act_state_tensor, DenseTensor* act_state_tensor UNUSED,
DenseTensor* hidden_tensor, DenseTensor* hidden_tensor,
const DenseTensor* weight_hh, const DenseTensor* weight_hh,
DenseTensor* pre_hidden, DenseTensor* pre_hidden,
DenseTensor* pre_state, DenseTensor* pre_state UNUSED,
DenseTensor* grad_hidden, DenseTensor* grad_hidden,
DenseTensor* grad_state, DenseTensor* grad_state UNUSED,
DenseTensor* grad_gate, DenseTensor* grad_gate,
DenseTensor* grad_weight_hh, DenseTensor* grad_weight_hh,
DenseTensor* grad_pre_hidden, DenseTensor* grad_pre_hidden,
DenseTensor* grad_pre_state, DenseTensor* grad_pre_state UNUSED,
DenseTensor* grad_bias_hh, DenseTensor* grad_bias_hh UNUSED,
const DenseTensor& mask_tensor, const DenseTensor& mask_tensor,
bool has_sequence_length) const override { bool has_sequence_length) const override {
DenseTensor grad_pre_hidden_bak; DenseTensor grad_pre_hidden_bak;
......
...@@ -57,13 +57,13 @@ struct SimpleRNNCell : Cell<T> { ...@@ -57,13 +57,13 @@ struct SimpleRNNCell : Cell<T> {
DenseTensor* input, DenseTensor* input,
const DenseTensor* weight_hh, const DenseTensor* weight_hh,
const DenseTensor* init_h, const DenseTensor* init_h,
const DenseTensor* init_c, const DenseTensor* init_c UNUSED,
DenseTensor* last_h, DenseTensor* last_h UNUSED,
DenseTensor* last_c, DenseTensor* last_c UNUSED,
DenseTensor* last_c_act, DenseTensor* last_c_act UNUSED,
DenseTensor* output, DenseTensor* output UNUSED,
const DenseTensor* bias_hh, const DenseTensor* bias_hh UNUSED,
DenseTensor* weight_hh_gru) const override { DenseTensor* weight_hh_gru UNUSED) const override {
auto blas = phi::funcs::GetBlas<CPUContext, T>(*dev_ctx); auto blas = phi::funcs::GetBlas<CPUContext, T>(*dev_ctx);
auto mat_dim_a = auto mat_dim_a =
phi::funcs::CreateMatrixDescriptor(init_h->dims(), 0, false); phi::funcs::CreateMatrixDescriptor(init_h->dims(), 0, false);
......
...@@ -25,9 +25,9 @@ namespace phi { ...@@ -25,9 +25,9 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void ScatterGradKernel(const Context &ctx, void ScatterGradKernel(const Context &ctx,
const DenseTensor &index, const DenseTensor &index,
const DenseTensor &updates, const DenseTensor &updates UNUSED,
const DenseTensor &out_grad, const DenseTensor &out_grad,
bool overwrite, bool overwrite UNUSED,
DenseTensor *x_grad, DenseTensor *x_grad,
DenseTensor *updates_grad) { DenseTensor *updates_grad) {
const auto &index_type = index.dtype(); const auto &index_type = index.dtype();
......
...@@ -56,8 +56,8 @@ void TopkGradKernel(const Context& dev_ctx, ...@@ -56,8 +56,8 @@ void TopkGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const Scalar& k_scalar, const Scalar& k_scalar,
int axis, int axis,
bool largest, bool largest UNUSED,
bool sorted, bool sorted UNUSED,
DenseTensor* x_grad) { DenseTensor* x_grad) {
const auto& in_dims = x.dims(); const auto& in_dims = x.dims();
const auto& out_dims = indices.dims(); const auto& out_dims = indices.dims();
......
...@@ -21,7 +21,7 @@ namespace phi { ...@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void TruncGradKernel(const Context& dev_ctx, void TruncGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad, const DenseTensor& out_grad UNUSED,
DenseTensor* in_grad) { DenseTensor* in_grad) {
T* dx_data = dev_ctx.template Alloc<T>(in_grad); T* dx_data = dev_ctx.template Alloc<T>(in_grad);
......
...@@ -21,8 +21,8 @@ namespace phi { ...@@ -21,8 +21,8 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void WhereGradKernel(const Context& ctx, void WhereGradKernel(const Context& ctx,
const DenseTensor& condition, const DenseTensor& condition,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& y, const DenseTensor& y UNUSED,
const DenseTensor& out_grad, const DenseTensor& out_grad,
DenseTensor* x_grad, DenseTensor* x_grad,
DenseTensor* y_grad) { DenseTensor* y_grad) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册