diff --git a/paddle/fluid/distributed/common/afs_warpper.h b/paddle/fluid/distributed/common/afs_warpper.h index 44ba6485fafa1d8d126f1a25f3478256729e498b..516b35448fe516ec78f1f21d06a13002817cf19c 100644 --- a/paddle/fluid/distributed/common/afs_warpper.h +++ b/paddle/fluid/distributed/common/afs_warpper.h @@ -22,7 +22,7 @@ #include "paddle/fluid/distributed/the_one_ps.pb.h" #include "paddle/fluid/string/string_helper.h" - +#include "paddle/phi/core/macros.h" namespace paddle { namespace distributed { struct FsDataConverter { @@ -43,7 +43,7 @@ class FsReadChannel { virtual ~FsReadChannel() {} FsReadChannel(FsReadChannel&&) = delete; FsReadChannel(const FsReadChannel&) = delete; - int open(std::shared_ptr fp, const FsChannelConfig& config) { + int open(std::shared_ptr fp, const FsChannelConfig& config UNUSED) { _file = fp; return 0; } @@ -83,7 +83,7 @@ class FsWriteChannel { FsWriteChannel(FsWriteChannel&&) = delete; FsWriteChannel(const FsWriteChannel&) = delete; - int open(std::shared_ptr fp, const FsChannelConfig& config) { + int open(std::shared_ptr fp, const FsChannelConfig& config UNUSED) { _file = fp; // the buffer has set in fs.cc diff --git a/paddle/fluid/distributed/ps/service/env.h b/paddle/fluid/distributed/ps/service/env.h old mode 100755 new mode 100644 index 8e97e2126c2882ba6202af556709c8c044c61016..aa230f86c9d1720ad4a73df1236ce44010420a4a --- a/paddle/fluid/distributed/ps/service/env.h +++ b/paddle/fluid/distributed/ps/service/env.h @@ -26,6 +26,7 @@ #include #include "gflags/gflags.h" +#include "paddle/phi/core/macros.h" namespace paddle { namespace distributed { @@ -115,19 +116,23 @@ class PSEnvironment { explicit PSEnvironment() {} // NOLINT virtual ~PSEnvironment() {} - virtual int32_t SetPsServers(uint64_t *host_sign_list, int node_num) { + virtual int32_t SetPsServers(uint64_t *host_sign_list UNUSED, + int node_num UNUSED) { return 0; } virtual int32_t SetPsServers( - const std::vector *host_endpoint_list, int node_num) { + const std::vector *host_endpoint_list UNUSED, + int node_num UNUSED) { return 0; } - virtual int32_t SetPsClients(uint64_t *host_sign_list, int node_num) { + virtual int32_t SetPsClients(uint64_t *host_sign_list UNUSED, + int node_num UNUSED) { return 0; } - virtual int32_t SetPsClients(std::string *host_endpoint_list, int node_num) { + virtual int32_t SetPsClients(std::string *host_endpoint_list UNUSED, + int node_num UNUSED) { return 0; } diff --git a/paddle/fluid/distributed/ps/service/ps_client.h b/paddle/fluid/distributed/ps/service/ps_client.h index b9aa37dd06f680248320246dabf3d76f27e5a8e6..4e3bb6c0774eced8c5c6c49e1671ec3a71ee6693 100644 --- a/paddle/fluid/distributed/ps/service/ps_client.h +++ b/paddle/fluid/distributed/ps/service/ps_client.h @@ -137,11 +137,11 @@ class PSClient { size_t num, bool is_training) = 0; - virtual std::future PullSparseParam(float **select_values, - size_t table_id, - const uint64_t *keys, - size_t num, - bool is_training) { + virtual std::future PullSparseParam(float **select_values UNUSED, + size_t table_id UNUSED, + const uint64_t *keys UNUSED, + size_t num UNUSED, + bool is_training UNUSED) { VLOG(0) << "Did not implement"; std::promise promise; std::future fut = promise.get_future(); @@ -149,13 +149,14 @@ class PSClient { return fut; } - virtual ::std::future PullSparsePtr(int shard_id, - char **select_values, - size_t table_id, - const uint64_t *keys, - size_t num, - uint16_t pass_id, - const uint16_t &dim_id = 0) { + virtual ::std::future PullSparsePtr( + int shard_id UNUSED, + char **select_values UNUSED, + size_t table_id UNUSED, + const uint64_t *keys UNUSED, + size_t num UNUSED, + uint16_t pass_id UNUSED, + const uint16_t &dim_id UNUSED = 0) { VLOG(0) << "Did not implement"; std::promise promise; std::future fut = promise.get_future(); @@ -164,9 +165,9 @@ class PSClient { } virtual std::future PrintTableStat(uint32_t table_id) = 0; - virtual std::future SaveCacheTable(uint32_t table_id, - uint16_t pass_id, - size_t threshold) { + virtual std::future SaveCacheTable(uint32_t table_id UNUSED, + uint16_t pass_id UNUSED, + size_t threshold UNUSED) { VLOG(0) << "Did not implement"; std::promise promise; std::future fut = promise.get_future(); @@ -201,9 +202,10 @@ class PSClient { virtual void FinalizeWorker() = 0; // client to client, 消息发送 - virtual std::future SendClient2ClientMsg(int msg_type, - int to_client_id, - const std::string &msg) { + virtual std::future SendClient2ClientMsg(int msg_type UNUSED, + int to_client_id UNUSED, + const std::string &msg + UNUSED) { VLOG(0) << "Did not implement"; std::promise promise; std::future fut = promise.get_future(); @@ -272,11 +274,11 @@ class PSClient { size_t num) = 0; // for save cache - virtual std::future CacheShuffle( - uint32_t table_id, - const std::string &path, - const std::string &mode, - const std::string &cache_threshold) { + virtual std::future CacheShuffle(uint32_t table_id UNUSED, + const std::string &path UNUSED, + const std::string &mode UNUSED, + const std::string &cache_threshold + UNUSED) { VLOG(0) << "Did not implement"; std::promise promise; std::future fut = promise.get_future(); @@ -285,10 +287,10 @@ class PSClient { } virtual std::future CacheShuffleMultiTable( - std::vector tables, - const std::string &path, - const std::string &mode, - const std::string &cache_threshold) { + std::vector tables UNUSED, + const std::string &path UNUSED, + const std::string &mode UNUSED, + const std::string &cache_threshold UNUSED) { VLOG(0) << "Did not implement"; std::promise promise; std::future fut = promise.get_future(); @@ -296,9 +298,9 @@ class PSClient { return fut; } - virtual std::future SaveCache(uint32_t table_id, - const std::string &path, - const std::string &mode) { + virtual std::future SaveCache(uint32_t table_id UNUSED, + const std::string &path UNUSED, + const std::string &mode UNUSED) { VLOG(0) << "Did not implement"; std::promise promise; std::future fut = promise.get_future(); @@ -306,9 +308,9 @@ class PSClient { return fut; } - virtual std::future GetCacheThreshold( - uint32_t table_id, - double &cache_threshold) { // NOLINT + virtual std::future GetCacheThreshold(uint32_t table_id UNUSED, + double &cache_threshold + UNUSED) { // NOLINT VLOG(0) << "Did not implement"; std::promise promise; std::future fut = promise.get_future(); @@ -333,7 +335,9 @@ class PSClient { } // add virtual std::shared_ptr TakePassSparseReferedValues( - const size_t &table_id, const uint16_t &pass_id, const uint16_t &dim_id) { + const size_t &table_id UNUSED, + const uint16_t &pass_id UNUSED, + const uint16_t &dim_id UNUSED) { VLOG(0) << "Did not implement"; return nullptr; } diff --git a/paddle/fluid/distributed/ps/table/accessor.h b/paddle/fluid/distributed/ps/table/accessor.h index 5ac0de018eeaf70c73cd48c8d01497ebdefb8ce0..b2971b28c4dc9b9720a38a43ebbe792fe541d9b8 100644 --- a/paddle/fluid/distributed/ps/table/accessor.h +++ b/paddle/fluid/distributed/ps/table/accessor.h @@ -91,8 +91,8 @@ class ValueAccessor { virtual AccessorInfo GetAccessorInfo() { return _accessor_info; } - virtual bool NeedExtendMF(float* value) { return false; } - virtual bool HasMF(size_t size) { return false; } + virtual bool NeedExtendMF(float* value UNUSED) { return false; } + virtual bool HasMF(size_t size UNUSED) { return false; } // converter for save virtual std::string GetConverter(int param) { auto itr = _data_coverter_map.find(param); @@ -118,11 +118,11 @@ class ValueAccessor { // param作为参数用于标识save阶段,如downpour的xbox与batch_model virtual bool Save(float* value, int param) = 0; // update delta_score and unseen_days after save - virtual void UpdateStatAfterSave(float* value, int param) {} + virtual void UpdateStatAfterSave(float* value UNUSED, int param UNUSED) {} // 判断该value是否保存到ssd virtual bool SaveSSD(float* value) = 0; // 判断热启时是否过滤slot对应的feasign - virtual bool FilterSlot(float* value) { return false; } + virtual bool FilterSlot(float* value UNUSED) { return false; } // virtual bool SaveCache(float* value, @@ -131,7 +131,9 @@ class ValueAccessor { // keys不存在时,为values生成随机值 virtual int32_t Create(float** value, size_t num) = 0; - virtual bool CreateValue(int type, const float* value) { return true; } + virtual bool CreateValue(int type UNUSED, const float* value UNUSED) { + return true; + } // 从values中选取到select_values中 virtual int32_t Select(float** select_values, const float** values, @@ -159,22 +161,24 @@ class ValueAccessor { return data_convert; } - virtual int SetWeight(float** values, - const float** update_values, - size_t num) { + virtual int SetWeight(float** values UNUSED, + const float** update_values UNUSED, + size_t num UNUSED) { return 0; } - virtual bool SaveMemCache(float* value, - int param, - double global_cache_threshold, - uint16_t pass_id) { + virtual bool SaveMemCache(float* value UNUSED, + int param UNUSED, + double global_cache_threshold UNUSED, + uint16_t pass_id UNUSED) { return true; } - virtual void UpdatePassId(float* value, uint16_t pass_id) {} + virtual void UpdatePassId(float* value UNUSED, uint16_t pass_id UNUSED) {} - virtual float GetField(float* value, const std::string& name) { return 0.0; } + virtual float GetField(float* value UNUSED, const std::string& name UNUSED) { + return 0.0; + } #define DEFINE_GET_INDEX(class, field) \ virtual int get_##field##_index() { return class ::field##_index(); } diff --git a/paddle/fluid/distributed/ps/table/graph/graph_edge.h b/paddle/fluid/distributed/ps/table/graph/graph_edge.h index 6b929af679e50f2c54aafe6a434c060a86e16c57..8a5c7c1ce10dbeace95dda54cf65e789d26bc332 100644 --- a/paddle/fluid/distributed/ps/table/graph/graph_edge.h +++ b/paddle/fluid/distributed/ps/table/graph/graph_edge.h @@ -16,6 +16,7 @@ #include #include #include +#include "paddle/phi/core/macros.h" namespace paddle { namespace distributed { @@ -26,7 +27,7 @@ class GraphEdgeBlob { size_t size() { return id_arr.size(); } virtual void add_edge(int64_t id, float weight); int64_t get_id(int idx) { return id_arr[idx]; } - virtual float get_weight(int idx) { return 1; } + virtual float get_weight(int idx UNUSED) { return 1; } std::vector& export_id_array() { return id_arr; } protected: diff --git a/paddle/fluid/distributed/ps/table/graph/graph_node.h b/paddle/fluid/distributed/ps/table/graph/graph_node.h index e1b5143a5d876818455b16acad8bf287b2276c1e..ee12644019b42b6e48caf56b5ed6782f5499521b 100644 --- a/paddle/fluid/distributed/ps/table/graph/graph_node.h +++ b/paddle/fluid/distributed/ps/table/graph/graph_node.h @@ -38,31 +38,35 @@ class Node { int64_t get_py_id() { return (int64_t)id; } void set_id(uint64_t id) { this->id = id; } - virtual void build_edges(bool is_weighted) {} - virtual void build_sampler(std::string sample_type) {} - virtual void add_edge(uint64_t id, float weight) {} + virtual void build_edges(bool is_weighted UNUSED) {} + virtual void build_sampler(std::string sample_type UNUSED) {} + virtual void add_edge(uint64_t id UNUSED, float weight UNUSED) {} virtual std::vector sample_k( - int k, const std::shared_ptr rng) { + int k UNUSED, const std::shared_ptr rng UNUSED) { return std::vector(); } - virtual uint64_t get_neighbor_id(int idx) { return 0; } - virtual float get_neighbor_weight(int idx) { return 1.; } + virtual uint64_t get_neighbor_id(int idx UNUSED) { return 0; } + virtual float get_neighbor_weight(int idx UNUSED) { return 1.; } virtual int get_size(bool need_feature); virtual void to_buffer(char *buffer, bool need_feature); virtual void recover_from_buffer(char *buffer); - virtual std::string get_feature(int idx) { return std::string(""); } - virtual int get_feature_ids(std::vector *res) const { return 0; } - virtual int get_feature_ids(int slot_idx, std::vector *res) const { + virtual std::string get_feature(int idx UNUSED) { return std::string(""); } + virtual int get_feature_ids(std::vector *res UNUSED) const { return 0; } - virtual int get_feature_ids(int slot_idx, - std::vector &feature_id, // NOLINT - std::vector &slot_id) const { // NOLINT + virtual int get_feature_ids(int slot_idx UNUSED, + std::vector *res UNUSED) const { + return 0; + } + virtual int get_feature_ids( + int slot_idx UNUSED, + std::vector &feature_id UNUSED, // NOLINT + std::vector &slot_id UNUSED) const { // NOLINT return 0; } - virtual void set_feature(int idx, const std::string &str) {} - virtual void set_feature_size(int size) {} + virtual void set_feature(int idx UNUSED, const std::string &str UNUSED) {} + virtual void set_feature_size(int size UNUSED) {} virtual void shrink_to_fit() {} virtual int get_feature_size() { return 0; } virtual size_t get_neighbor_size() { return 0; } diff --git a/paddle/fluid/eager/eager_tensor.h b/paddle/fluid/eager/eager_tensor.h index ae9030118a97bc2af674df28d6032258fc2c43ca..28431b79eeec6339761336df6340600cbf97e188 100644 --- a/paddle/fluid/eager/eager_tensor.h +++ b/paddle/fluid/eager/eager_tensor.h @@ -20,6 +20,7 @@ // Phi deps #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/core/compat/convert_utils.h" +#include "paddle/phi/core/macros.h" namespace egr { @@ -133,10 +134,10 @@ class VariableCompatTensor bool initialized() const override { return IsInitialized(); } - void* AllocateFrom(phi::Allocator* allocator, - phi::DataType dtype, - size_t requested_size = 0, - bool fake_alloc = false) override { + void* AllocateFrom(phi::Allocator* allocator UNUSED, + phi::DataType dtype UNUSED, + size_t requested_size UNUSED = 0, + bool fake_alloc UNUSED = false) override { PADDLE_THROW(paddle::platform::errors::Unavailable( "VariableCompatTensor does not support `AllocateFrom` method.")); } diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index 40a7439b06f5adbaffda6c411bcb13605717dc16..1be8e13e2ec748abde7c6f23112a5a9aaf7a10f5 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -1135,10 +1135,10 @@ struct ElementwiseOp : public PatternBase { }; struct MatmulElementwiseAdd : public PatternBase { - MatmulElementwiseAdd(PDPattern* pattern, - const std::string& name_scope, - const std::string& matmul_type, - bool as_x) + MatmulElementwiseAdd(PDPattern* pattern UNUSED, + const std::string& name_scope UNUSED, + const std::string& matmul_type UNUSED, + bool as_x UNUSED) : PatternBase(pattern, name_scope, "matmul_elementwise_add") {} PDNode* operator()(const std::string& matmul_type, bool as_x); @@ -1155,7 +1155,7 @@ struct MatmulElementwiseAdd : public PatternBase { struct ResidualElementwise : public PatternBase { ResidualElementwise(PDPattern* pattern, const std::string& name_scope, - bool as_x) + bool as_x UNUSED) : PatternBase(pattern, name_scope, "residual_elementwise") {} PDNode* operator()(PDNode* op_var, PDNode* residual_var, diff --git a/paddle/fluid/framework/new_executor/workqueue/event_count.h b/paddle/fluid/framework/new_executor/workqueue/event_count.h index df2169302de2b6994b94a9b91ccaadd1fa20683f..c9fd47b92383f8ae13b9747ce71b699f7ceb5c19 100644 --- a/paddle/fluid/framework/new_executor/workqueue/event_count.h +++ b/paddle/fluid/framework/new_executor/workqueue/event_count.h @@ -56,6 +56,7 @@ #include #include "glog/logging.h" +#include "paddle/phi/core/macros.h" namespace paddle { namespace framework { @@ -242,7 +243,7 @@ class EventCount { Waiter* waiters_{nullptr}; size_t waiter_num_{0}; - static void CheckState(uint64_t state, bool waiter = false) { + static void CheckState(uint64_t state, bool waiter UNUSED = false) { static_assert(kEpochBits >= 20, "not enough bits to prevent ABA problem"); const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift; const uint64_t signals = (state & kSignalMask) >> kSignalShift; diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h index b02e2e05efd6b5d83e02285b6104f1b00c002068..014f4e828efc782c4f7cc9c005e32bbccf2d6876 100644 --- a/paddle/fluid/inference/analysis/argument.h +++ b/paddle/fluid/inference/analysis/argument.h @@ -116,7 +116,7 @@ struct Argument { } \ void Set##Field##NotOwned(type__* x) { \ valid_fields_.insert(#field__); \ - field__##_ = unique_ptr_t(x, [](void* x) {}); \ + field__##_ = unique_ptr_t(x, [](void* x UNUSED) {}); \ } \ DECL_ARGUMENT_FIELD_VALID(field__); \ type__* field__##_ptr() { \ diff --git a/paddle/fluid/operators/elementwise/elementwise_op_function.h b/paddle/fluid/operators/elementwise/elementwise_op_function.h index 251df9b9d9e4e3151ebfd72c670994be75b75613..b1d5f13bf858b7ee5bb915a3711c69f677f2d876 100644 --- a/paddle/fluid/operators/elementwise/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise/elementwise_op_function.h @@ -641,13 +641,13 @@ template inline phi::DenseTensor *_sliceWrapper(const phi::DenseTensor &self, const phi::CPUContext &ctx, - py::object obj, + py::object obj UNUSED, int dim, int64_t start, int64_t slicelength) { diff --git a/paddle/phi/backends/gpu/cuda/cudnn_helper.h b/paddle/phi/backends/gpu/cuda/cudnn_helper.h index 468567a9ee9d89632705f9380fae63238ffc6c25..8b6c04090d88e29a382ea04cd5d221ad2d257fba 100644 --- a/paddle/phi/backends/gpu/cuda/cudnn_helper.h +++ b/paddle/phi/backends/gpu/cuda/cudnn_helper.h @@ -349,7 +349,7 @@ class ScopedDropoutDescriptor { } inline cudnnDropoutDescriptor_t descriptor(const cudnnHandle_t& handle, - const phi::Place& place, + const phi::Place& place UNUSED, bool initialized, float dropout_prob_, phi::DenseTensor* dropout_state_, diff --git a/paddle/phi/backends/onednn/onednn_helper.h b/paddle/phi/backends/onednn/onednn_helper.h index 2f9557c86a699bb534b23d6d34c1752b11adeea0..9b5aa167a62763f287925653503f4b706d7fe72a 100644 --- a/paddle/phi/backends/onednn/onednn_helper.h +++ b/paddle/phi/backends/onednn/onednn_helper.h @@ -282,8 +282,8 @@ inline std::string ThreadIDasStr(void) { std::hash()(std::this_thread::get_id())); } -inline std::string ExtendKeyWithThreadInfoIfNeeded(const OneDNNContext& dev_ctx, - const std::string& key) { +inline std::string ExtendKeyWithThreadInfoIfNeeded( + const OneDNNContext& dev_ctx UNUSED, const std::string& key) { return (OneDNNContext::tls().is_tid_used_in_key() == true) ? key + "-t:" + ThreadIDasStr() : key; diff --git a/paddle/phi/kernels/funcs/elementwise_grad_base.h b/paddle/phi/kernels/funcs/elementwise_grad_base.h index 858fc6766afdcbd4519b4fa71a6e30344014caf5..49f593289a9f11ed0ce597b2d21d75926d50438b 100644 --- a/paddle/phi/kernels/funcs/elementwise_grad_base.h +++ b/paddle/phi/kernels/funcs/elementwise_grad_base.h @@ -382,12 +382,12 @@ template void ElemwiseGradComputeNoBroadcast(const DeviceContext &dev_ctx, const DDim &x_dim, - const DDim &y_dim, + const DDim &y_dim UNUSED, const DenseTensor &x, const DenseTensor &y, const DenseTensor &out, const DenseTensor &dout, - int axis, + int axis UNUSED, DenseTensor *dx, DenseTensor *dy, DX_OP dx_op, diff --git a/paddle/phi/kernels/funcs/gather_scatter_functor.cc b/paddle/phi/kernels/funcs/gather_scatter_functor.cc index db9e8a8247f30a61311396718ec87ce8dac88593..57a8d679f346fd4c7be384bd82fd82e02a8620aa 100644 --- a/paddle/phi/kernels/funcs/gather_scatter_functor.cc +++ b/paddle/phi/kernels/funcs/gather_scatter_functor.cc @@ -16,6 +16,8 @@ limitations under the License. */ #include "glog/logging.h" +#include "paddle/phi/core/macros.h" + namespace phi { namespace funcs { @@ -53,11 +55,11 @@ struct cpu_gather_scatter_functor { template void operator()(phi::DenseTensor self, int dim, - const phi::DenseTensor& index, + const phi::DenseTensor& index UNUSED, const phi::DenseTensor& src, - const std::string& method_name, + const std::string& method_name UNUSED, const func_t& reduce_op, - const phi::DeviceContext& ctx) { + const phi::DeviceContext& ctx UNUSED) { if (index.numel() == 0) { return; } diff --git a/paddle/phi/kernels/funcs/jit/kernel_base.h b/paddle/phi/kernels/funcs/jit/kernel_base.h index 5f2e48076d92e16cf8eacc635f0cb7f2dd28eb8d..78bedf184975cd76a9c3970e62b76c8e04335afa 100644 --- a/paddle/phi/kernels/funcs/jit/kernel_base.h +++ b/paddle/phi/kernels/funcs/jit/kernel_base.h @@ -361,7 +361,8 @@ template class ReferKernel : public KernelMore { public: // Refer code can always be used - bool CanBeUsed(const typename KernelTuple::attr_type& attr) const override { + bool CanBeUsed( + const typename KernelTuple::attr_type& attr UNUSED) const override { return true; } const char* ImplType() const override { return "Refer"; } diff --git a/paddle/phi/kernels/funcs/jit/registry.h b/paddle/phi/kernels/funcs/jit/registry.h index d65a8503b7c31fcbd891ae79e919d8c0608cc1ad..e9b371312548f66b77bf162ab8df95e8d89a033b 100644 --- a/paddle/phi/kernels/funcs/jit/registry.h +++ b/paddle/phi/kernels/funcs/jit/registry.h @@ -43,7 +43,7 @@ struct JitKernelRegistrarFunctor; template struct JitKernelRegistrarFunctor { - void operator()(KernelType kt) const {} + void operator()(KernelType kt UNUSED) const {} }; template diff --git a/paddle/phi/kernels/impl/slice_grad_kernel_impl.h b/paddle/phi/kernels/impl/slice_grad_kernel_impl.h index 152a2c7ff21d5a920d24ff3cbd7e45dd6c90f084..6128f8c7e19026b80b309869f751be5a94e94b51 100644 --- a/paddle/phi/kernels/impl/slice_grad_kernel_impl.h +++ b/paddle/phi/kernels/impl/slice_grad_kernel_impl.h @@ -14,13 +14,13 @@ #pragma once +#include "paddle/phi/core/macros.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/eigen/eigen_function.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/slice_utils.h" #include "paddle/phi/kernels/slice_grad_kernel.h" - namespace phi { template @@ -212,8 +212,8 @@ void SliceGradCompute(const Context& ctx, const DenseTensor& out_grad, const std::vector& axes, const std::vector& starts, - const std::vector& ends, - const std::vector& infer_flags, + const std::vector& ends UNUSED, + const std::vector& infer_flags UNUSED, const std::vector& decrease_axis, DenseTensor* input_grad) { auto* d_out = &out_grad;