未验证 提交 8ed01e8d 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunsed-parameter] warning (#53687)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 ca2ea162
......@@ -32,7 +32,7 @@ struct UnrollFillConstant {
template <size_t kStart, size_t kEnd>
struct UnrollFillConstant<kStart, kEnd, true> {
template <typename T>
HOSTDEVICE inline static void Run(T *data, T val) {}
HOSTDEVICE inline static void Run(T *data UNUSED, T val UNUSED) {}
};
template <size_t kStart, size_t kEnd, bool kStop>
......
......@@ -31,7 +31,7 @@ namespace phi {
template <typename T, bool IsFoundInfOnCPU>
class UpdateLossScalingFunctor<phi::CPUContext, T, IsFoundInfOnCPU> {
public:
void operator()(const phi::CPUContext& ctx,
void operator()(const phi::CPUContext& ctx UNUSED,
const bool* found_inf_data,
const T* pre_loss_scaling_data,
const int* good_in_data,
......
......@@ -55,7 +55,7 @@ void ArgsortGradKernel(const Context& dev_ctx,
const DenseTensor& input,
const DenseTensor& out_grad,
int axis,
bool descending,
bool descending UNUSED,
DenseTensor* in_grad) {
auto in_dims = indices.dims();
auto rank = input.dims().size();
......
......@@ -122,10 +122,10 @@ struct SameDimsDivideFunctor<
DevCtx,
T,
typename std::enable_if<!std::is_floating_point<T>::value>::type> {
void operator()(const DevCtx& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* z) {
void operator()(const DevCtx& dev_ctx UNUSED,
const DenseTensor& x UNUSED,
const DenseTensor& y UNUSED,
DenseTensor* z UNUSED) {
phi::errors::InvalidArgument(
"If use SameDimsDivideFunctor, template args(T) must be floating "
"point. ");
......
......@@ -31,7 +31,8 @@ void HSigmoidLossGradKernelImpl(const Context& ctx,
const DenseTensor& label,
const paddle::optional<DenseTensor>& path,
const paddle::optional<DenseTensor>& code,
const paddle::optional<DenseTensor>& bias,
const paddle::optional<DenseTensor>& bias
UNUSED,
const DenseTensor& pre_out,
const DenseTensor& out_grad,
int num_classes,
......
......@@ -69,7 +69,7 @@ void IndexSampleGradInner(const Context& context,
template <typename T, typename Context>
void IndexSampleGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& index,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
......
......@@ -26,7 +26,7 @@ namespace phi {
template <typename Context, typename T, class Enable = void>
struct IndexSelectAdd {
void operator()(const Context& ctx,
void operator()(const Context& ctx UNUSED,
int slice_size,
const T* src_pointer,
const T* p_pointer,
......
......@@ -49,7 +49,7 @@ void KthvalueGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& indices,
const DenseTensor& d_out,
int k,
int k UNUSED,
int axis,
bool keepdim,
DenseTensor* d_x) {
......
......@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context>
void MaskedSelectGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& mask,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
......
......@@ -26,7 +26,7 @@ void CalcMedianGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& median_index,
const DenseTensor& out_grad,
const IntArray& axes,
const IntArray& axes UNUSED,
DenseTensor* x_grad,
T* x_grad_ptr) {
phi::funcs::SetConstant<Context, T> set_zero;
......@@ -83,7 +83,7 @@ void NanmedianGradKernel(const Context& dev_ctx,
const DenseTensor& median_index,
const DenseTensor& out_grad,
const IntArray& axes,
bool keep_dim,
bool keep_dim UNUSED,
DenseTensor* x_grad) {
BaseMedianGradKernel<T, Context>(
dev_ctx, input, median_index, out_grad, axes, x_grad);
......
......@@ -192,7 +192,7 @@ template <typename T, typename Context>
void NanmedianKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& axes,
bool keepdim,
bool keepdim UNUSED,
DenseTensor* out,
DenseTensor* median_index) {
BaseMedianKernel<T, Context>(dev_ctx, x, axes, out, median_index, true);
......
......@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context>
void RandpermKernel(const Context& dev_ctx,
int n,
DataType dtype,
DataType dtype UNUSED,
DenseTensor* out) {
T* out_data = dev_ctx.template Alloc<T>(out);
int seed = 0;
......
......@@ -26,7 +26,7 @@ namespace phi {
template <typename T, typename Context>
void RepeatInterleaveWithTensorIndexGradKernel(
const Context& ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& repeats_tensor,
const DenseTensor& out_grad,
int dim,
......@@ -74,7 +74,7 @@ void RepeatInterleaveWithTensorIndexGradKernel(
template <typename T, typename Context>
void RepeatInterleaveGradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& x UNUSED,
const DenseTensor& out_grad,
int repeats,
int dim,
......
......@@ -164,19 +164,19 @@ template <typename T, template <typename> class EigenActivationBackwardFunctor>
struct SimpleRNNGradCell : GradCell<T> {
void operator()(const CPUContext& dev_ctx,
DenseTensor* gate_tensor,
DenseTensor* state_tensor,
DenseTensor* act_state_tensor,
DenseTensor* state_tensor UNUSED,
DenseTensor* act_state_tensor UNUSED,
DenseTensor* hidden_tensor,
const DenseTensor* weight_hh,
DenseTensor* pre_hidden,
DenseTensor* pre_state,
DenseTensor* pre_state UNUSED,
DenseTensor* grad_hidden,
DenseTensor* grad_state,
DenseTensor* grad_state UNUSED,
DenseTensor* grad_gate,
DenseTensor* grad_weight_hh,
DenseTensor* grad_pre_hidden,
DenseTensor* grad_pre_state,
DenseTensor* grad_bias_hh,
DenseTensor* grad_pre_state UNUSED,
DenseTensor* grad_bias_hh UNUSED,
const DenseTensor& mask_tensor,
bool has_sequence_length) const override {
DenseTensor grad_pre_hidden_bak;
......
......@@ -57,13 +57,13 @@ struct SimpleRNNCell : Cell<T> {
DenseTensor* input,
const DenseTensor* weight_hh,
const DenseTensor* init_h,
const DenseTensor* init_c,
DenseTensor* last_h,
DenseTensor* last_c,
DenseTensor* last_c_act,
DenseTensor* output,
const DenseTensor* bias_hh,
DenseTensor* weight_hh_gru) const override {
const DenseTensor* init_c UNUSED,
DenseTensor* last_h UNUSED,
DenseTensor* last_c UNUSED,
DenseTensor* last_c_act UNUSED,
DenseTensor* output UNUSED,
const DenseTensor* bias_hh UNUSED,
DenseTensor* weight_hh_gru UNUSED) const override {
auto blas = phi::funcs::GetBlas<CPUContext, T>(*dev_ctx);
auto mat_dim_a =
phi::funcs::CreateMatrixDescriptor(init_h->dims(), 0, false);
......
......@@ -25,9 +25,9 @@ namespace phi {
template <typename T, typename Context>
void ScatterGradKernel(const Context &ctx,
const DenseTensor &index,
const DenseTensor &updates,
const DenseTensor &updates UNUSED,
const DenseTensor &out_grad,
bool overwrite,
bool overwrite UNUSED,
DenseTensor *x_grad,
DenseTensor *updates_grad) {
const auto &index_type = index.dtype();
......
......@@ -56,8 +56,8 @@ void TopkGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
const Scalar& k_scalar,
int axis,
bool largest,
bool sorted,
bool largest UNUSED,
bool sorted UNUSED,
DenseTensor* x_grad) {
const auto& in_dims = x.dims();
const auto& out_dims = indices.dims();
......
......@@ -21,7 +21,7 @@ namespace phi {
template <typename T, typename Context>
void TruncGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
const DenseTensor& out_grad UNUSED,
DenseTensor* in_grad) {
T* dx_data = dev_ctx.template Alloc<T>(in_grad);
......
......@@ -21,8 +21,8 @@ namespace phi {
template <typename T, typename Context>
void WhereGradKernel(const Context& ctx,
const DenseTensor& condition,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& x UNUSED,
const DenseTensor& y UNUSED,
const DenseTensor& out_grad,
DenseTensor* x_grad,
DenseTensor* y_grad) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册