未验证 提交 bafc3469 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-parameter]warning (#53617)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 e588f2d9
...@@ -189,7 +189,7 @@ std::shared_ptr<ProcessGroup::Task> ProcessGroupCustom::Collective( ...@@ -189,7 +189,7 @@ std::shared_ptr<ProcessGroup::Task> ProcessGroupCustom::Collective(
std::vector<phi::DenseTensor>& outputs, std::vector<phi::DenseTensor>& outputs,
Fn fn, Fn fn,
CommType op_type, CommType op_type,
bool sync_op, bool sync_op UNUSED,
bool use_calc_stream) { bool use_calc_stream) {
const auto places = GetPlaceList(inputs); const auto places = GetPlaceList(inputs);
const auto key = GetKeyFromPlaces(places); const auto key = GetKeyFromPlaces(places);
......
...@@ -227,8 +227,10 @@ class DownpourPServerBrpcClosure : public PServerClosure { ...@@ -227,8 +227,10 @@ class DownpourPServerBrpcClosure : public PServerClosure {
PsRequestMessage *request(size_t i) { return &_requests[i]; } PsRequestMessage *request(size_t i) { return &_requests[i]; }
PsResponseMessage *response(size_t i) { return &_responses[i]; } PsResponseMessage *response(size_t i) { return &_responses[i]; }
brpc::Controller *cntl(size_t i) { return _cntls[i].get(); } brpc::Controller *cntl(size_t i) { return _cntls[i].get(); }
int check_response(size_t request_idx, int cmd_id) { return 1; } int check_response(size_t request_idx UNUSED, int cmd_id UNUSED) { return 1; }
int check_save_response(size_t request_idx, int cmd_id) { return 1; } int check_save_response(size_t request_idx UNUSED, int cmd_id UNUSED) {
return 1;
}
private: private:
std::atomic<int32_t> _waiting_num; std::atomic<int32_t> _waiting_num;
......
...@@ -287,10 +287,10 @@ class Communicator { ...@@ -287,10 +287,10 @@ class Communicator {
return {}; return {};
} }
virtual void SaveFLStrategy( virtual void SaveFLStrategy(
const std::unordered_map<uint32_t, std::string> &fl_strategy) {} const std::unordered_map<uint32_t, std::string> &fl_strategy UNUSED) {}
virtual void StartCoordinator( virtual void StartCoordinator(
const std::string &self_endpoint, const std::string &self_endpoint UNUSED,
const std::vector<std::string> &trainer_endpoints) {} const std::vector<std::string> &trainer_endpoints UNUSED) {}
virtual ~Communicator() {} virtual ~Communicator() {}
virtual void RpcProfilerControl(); virtual void RpcProfilerControl();
...@@ -337,13 +337,13 @@ class Communicator { ...@@ -337,13 +337,13 @@ class Communicator {
virtual void BarrierTriggerDecrement() {} virtual void BarrierTriggerDecrement() {}
virtual void BarrierTriggerReset(int init_counter) {} virtual void BarrierTriggerReset(int init_counter UNUSED) {}
virtual void InitEnvs() = 0; virtual void InitEnvs() = 0;
virtual void InitImpl(const RpcCtxMap &send_varname_to_ctx, virtual void InitImpl(const RpcCtxMap &send_varname_to_ctx UNUSED,
const RecvCtxMap &recv_varname_to_ctx, const RecvCtxMap &recv_varname_to_ctx UNUSED,
Scope *recv_scope) {} Scope *recv_scope UNUSED) {}
static Communicator *GetInstance() { return communicator_.get(); } static Communicator *GetInstance() { return communicator_.get(); }
...@@ -682,9 +682,9 @@ class FLCommunicator : public GeoCommunicator { ...@@ -682,9 +682,9 @@ class FLCommunicator : public GeoCommunicator {
virtual void InitBrpcClient(const std::string &dist_desc, virtual void InitBrpcClient(const std::string &dist_desc,
const std::vector<std::string> &host_sign_list); const std::vector<std::string> &host_sign_list);
void InitImpl(const RpcCtxMap &send_varname_to_ctx, void InitImpl(const RpcCtxMap &send_varname_to_ctx UNUSED,
const RecvCtxMap &recv_varname_to_ctx, const RecvCtxMap &recv_varname_to_ctx UNUSED,
Scope *recv_scope) {} Scope *recv_scope UNUSED) {}
void StartCoordinatorClient( void StartCoordinatorClient(
const std::vector<std::string> &trainer_endpoints); const std::vector<std::string> &trainer_endpoints);
......
...@@ -151,8 +151,8 @@ class CoordinatorService : public PsService { ...@@ -151,8 +151,8 @@ class CoordinatorService : public PsService {
::google::protobuf::Closure* done); ::google::protobuf::Closure* done);
int32_t SaveFLClientInfo(const CoordinatorReqMessage& request, int32_t SaveFLClientInfo(const CoordinatorReqMessage& request,
CoordinatorResMessage* response, CoordinatorResMessage* response UNUSED,
brpc::Controller* cntl) { brpc::Controller* cntl UNUSED) {
_coordinator_service_handle->SaveFLClientInfo(request); _coordinator_service_handle->SaveFLClientInfo(request);
return 0; return 0;
} }
......
...@@ -26,7 +26,7 @@ class element_visitor { ...@@ -26,7 +26,7 @@ class element_visitor {
explicit element_visitor(int index) : index_(index) {} explicit element_visitor(int index) : index_(index) {}
template <typename T> template <typename T>
Attribute operator()(const T& attr) const { Attribute operator()(const T& attr UNUSED) const {
PADDLE_THROW(platform::errors::Unimplemented("Unimplemented operand.")); PADDLE_THROW(platform::errors::Unimplemented("Unimplemented operand."));
} }
......
...@@ -70,7 +70,7 @@ class TrainerBase { ...@@ -70,7 +70,7 @@ class TrainerBase {
virtual Scope* GetWorkerScope(int thread_id) = 0; virtual Scope* GetWorkerScope(int thread_id) = 0;
virtual void InitDumpEnv() = 0; virtual void InitDumpEnv() = 0;
virtual void DumpWork(int tid); virtual void DumpWork(int tid);
virtual void ResetDataset(Dataset* dataset_ptr) {} virtual void ResetDataset(Dataset* dataset_ptr UNUSED) {}
protected: protected:
virtual std::string GetDumpPath(int tid) = 0; virtual std::string GetDumpPath(int tid) = 0;
......
...@@ -30,12 +30,12 @@ class RNNMKLDNNHandler : public phi::funcs::OneDNNHandlerT<T, T_alg> { ...@@ -30,12 +30,12 @@ class RNNMKLDNNHandler : public phi::funcs::OneDNNHandlerT<T, T_alg> {
public: public:
RNNMKLDNNHandler(const paddle::framework::ExecutionContext& ctx, RNNMKLDNNHandler(const paddle::framework::ExecutionContext& ctx,
const phi::OneDNNContext& dev_ctx, const phi::OneDNNContext& dev_ctx,
const dnnl::engine onednn_engine, const dnnl::engine onednn_engine UNUSED,
platform::Place cpu_place, platform::Place cpu_place,
const phi::DenseTensor* input, const phi::DenseTensor* input UNUSED,
const phi::DenseTensor* weight_h, const phi::DenseTensor* weight_h UNUSED,
const phi::DenseTensor* h0, const phi::DenseTensor* h0 UNUSED,
const bool is_reverse, const bool is_reverse UNUSED,
const int64_t N, const int64_t N,
const int64_t Ti, const int64_t Ti,
const int64_t IC, const int64_t IC,
......
...@@ -56,7 +56,7 @@ struct ConcatDenseTensor<platform::CustomDeviceContext, T> { ...@@ -56,7 +56,7 @@ struct ConcatDenseTensor<platform::CustomDeviceContext, T> {
void operator()(const platform::CustomDeviceContext &context, void operator()(const platform::CustomDeviceContext &context,
const std::vector<phi::DenseTensor> &in, const std::vector<phi::DenseTensor> &in,
phi::DenseTensor *out, phi::DenseTensor *out,
int axis = 0) { int axis UNUSED = 0) {
auto *out_data = out->data<T>(); auto *out_data = out->data<T>();
auto *device = phi::DeviceManager::GetDeviceWithPlace(context.GetPlace()); auto *device = phi::DeviceManager::GetDeviceWithPlace(context.GetPlace());
size_t offset = 0; size_t offset = 0;
...@@ -80,7 +80,7 @@ struct SplitDenseTensor<platform::CustomDeviceContext, T> { ...@@ -80,7 +80,7 @@ struct SplitDenseTensor<platform::CustomDeviceContext, T> {
void operator()(const platform::CustomDeviceContext &context, void operator()(const platform::CustomDeviceContext &context,
const phi::DenseTensor &in, const phi::DenseTensor &in,
std::vector<phi::DenseTensor *> *out, std::vector<phi::DenseTensor *> *out,
int axis = 0) { int axis UNUSED = 0) {
auto *in_data = in.data<T>(); auto *in_data = in.data<T>();
auto *device = phi::DeviceManager::GetDeviceWithPlace(context.GetPlace()); auto *device = phi::DeviceManager::GetDeviceWithPlace(context.GetPlace());
size_t offset = 0; size_t offset = 0;
......
...@@ -191,7 +191,8 @@ inline void AppendKey(std::string* key, const std::vector<T>& dims) { ...@@ -191,7 +191,8 @@ inline void AppendKey(std::string* key, const std::vector<T>& dims) {
} }
template <typename... ArgTypes> template <typename... ArgTypes>
inline std::string CreateKey(const OneDNNContext& dev_ctx, ArgTypes&&... args) { inline std::string CreateKey(const OneDNNContext& dev_ctx UNUSED,
ArgTypes&&... args) {
std::string key; std::string key;
key.reserve(64); key.reserve(64);
using expand_type = int[]; using expand_type = int[];
......
...@@ -676,7 +676,7 @@ class OneDNNHandlerNoCachingT { ...@@ -676,7 +676,7 @@ class OneDNNHandlerNoCachingT {
const dnnl::memory::desc& user_md, const dnnl::memory::desc& user_md,
const dnnl::memory::desc& target_md, const dnnl::memory::desc& target_md,
void* ptr, void* ptr,
bool is_persistent = false, bool is_persistent UNUSED = false,
std::function<std::shared_ptr<F>(const F*)> custom_reorder_func = {}) { std::function<std::shared_ptr<F>(const F*)> custom_reorder_func = {}) {
std::shared_ptr<dnnl::memory> target_memory_p; std::shared_ptr<dnnl::memory> target_memory_p;
if (custom_reorder_func) { if (custom_reorder_func) {
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
DECLARE_int32(search_cache_max_number); DECLARE_int32(search_cache_max_number);
inline void HashCombine(std::size_t* seed) {} inline void HashCombine(std::size_t* seed UNUSED) {}
// combine hash value // combine hash value
// https://stackoverflow.com/questions/2590677/how-do-i-combine-hash-values-in-c0x // https://stackoverflow.com/questions/2590677/how-do-i-combine-hash-values-in-c0x
......
...@@ -127,7 +127,7 @@ template <typename Context, ...@@ -127,7 +127,7 @@ template <typename Context,
class BinaryFunctor, class BinaryFunctor,
typename T> typename T>
struct BinaryOperation { struct BinaryOperation {
void operator()(const Context& dev_ctx, void operator()(const Context& dev_ctx UNUSED,
const DenseTensor& lhs, const DenseTensor& lhs,
const DenseTensor& rhs, const DenseTensor& rhs,
DenseTensor* output) { DenseTensor* output) {
......
...@@ -38,7 +38,7 @@ struct LogsumexpGradFunctor { ...@@ -38,7 +38,7 @@ struct LogsumexpGradFunctor {
DX* dx, DX* dx,
DY* dy, DY* dy,
const Dim& dim, const Dim& dim,
int size) { int size UNUSED) {
using MT = typename phi::dtype::MPTypeTrait<T>::Type; using MT = typename phi::dtype::MPTypeTrait<T>::Type;
auto x_mt = (*x).template cast<MT>(); auto x_mt = (*x).template cast<MT>();
auto y_mt = (*y).template cast<MT>(); auto y_mt = (*y).template cast<MT>();
......
...@@ -103,7 +103,7 @@ void MatMulFunctionImplWithBlas( ...@@ -103,7 +103,7 @@ void MatMulFunctionImplWithBlas(
bool trans_x, bool trans_x,
bool trans_y, bool trans_y,
bool flag = false, bool flag = false,
phi::funcs::MatmulPlanner* matmul_planner = nullptr) { phi::funcs::MatmulPlanner* matmul_planner UNUSED = nullptr) {
const int x_ndim = x_dims.size(); const int x_ndim = x_dims.size();
const int y_ndim = y_dims.size(); const int y_ndim = y_dims.size();
......
...@@ -39,8 +39,8 @@ class GpuAndCpuSearchSortedCompute { ...@@ -39,8 +39,8 @@ class GpuAndCpuSearchSortedCompute {
return std::isnan(x); return std::isnan(x);
#endif #endif
} }
static HOSTDEVICE bool IsNan(int x) { return false; } static HOSTDEVICE bool IsNan(int x UNUSED) { return false; }
static HOSTDEVICE bool IsNan(int64_t x) { return false; } static HOSTDEVICE bool IsNan(int64_t x UNUSED) { return false; }
static HOSTDEVICE bool IsInf(float x) { static HOSTDEVICE bool IsInf(float x) {
#ifdef __NVCC__ #ifdef __NVCC__
...@@ -56,8 +56,8 @@ class GpuAndCpuSearchSortedCompute { ...@@ -56,8 +56,8 @@ class GpuAndCpuSearchSortedCompute {
return std::isinf(x); return std::isinf(x);
#endif #endif
} }
static HOSTDEVICE bool IsInf(int x) { return false; } static HOSTDEVICE bool IsInf(int x UNUSED) { return false; }
static HOSTDEVICE bool IsInf(int64_t x) { return false; } static HOSTDEVICE bool IsInf(int64_t x UNUSED) { return false; }
HOSTDEVICE GpuAndCpuSearchSortedCompute(const T1* sequence_data, HOSTDEVICE GpuAndCpuSearchSortedCompute(const T1* sequence_data,
const T2* value_data, const T2* value_data,
......
...@@ -25,7 +25,7 @@ namespace phi { ...@@ -25,7 +25,7 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void SplitKernel(const Context& dev_ctx, void SplitKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const IntArray& sections, const IntArray& sections UNUSED,
const Scalar& axis_scalar, const Scalar& axis_scalar,
std::vector<DenseTensor*> outs) { std::vector<DenseTensor*> outs) {
std::vector<const DenseTensor*> shape_refer; std::vector<const DenseTensor*> shape_refer;
......
...@@ -54,7 +54,7 @@ template <typename T, typename Context> ...@@ -54,7 +54,7 @@ template <typename T, typename Context>
void FullLikeCsrKernel(const Context& dev_ctx, void FullLikeCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x, const SparseCsrTensor& x,
const Scalar& val, const Scalar& val,
DataType dtype, DataType dtype UNUSED,
SparseCsrTensor* out) { SparseCsrTensor* out) {
phi::Copy<Context>(dev_ctx, phi::Copy<Context>(dev_ctx,
x.non_zero_crows(), x.non_zero_crows(),
......
...@@ -158,7 +158,7 @@ void ValuesCsrKernel(const Context& dev_ctx UNUSED, ...@@ -158,7 +158,7 @@ void ValuesCsrKernel(const Context& dev_ctx UNUSED,
} }
template <typename T, typename Context> template <typename T, typename Context>
void IndicesCooKernel(const Context& dev_ctx, void IndicesCooKernel(const Context& dev_ctx UNUSED,
const SparseCooTensor& x, const SparseCooTensor& x,
DenseTensor* out) { DenseTensor* out) {
*out = x.indices(); *out = x.indices();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册