未验证 提交 bafc3469 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-parameter]warning (#53617)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop
上级 e588f2d9
......@@ -189,7 +189,7 @@ std::shared_ptr<ProcessGroup::Task> ProcessGroupCustom::Collective(
std::vector<phi::DenseTensor>& outputs,
Fn fn,
CommType op_type,
bool sync_op,
bool sync_op UNUSED,
bool use_calc_stream) {
const auto places = GetPlaceList(inputs);
const auto key = GetKeyFromPlaces(places);
......
......@@ -227,8 +227,10 @@ class DownpourPServerBrpcClosure : public PServerClosure {
PsRequestMessage *request(size_t i) { return &_requests[i]; }
PsResponseMessage *response(size_t i) { return &_responses[i]; }
brpc::Controller *cntl(size_t i) { return _cntls[i].get(); }
int check_response(size_t request_idx, int cmd_id) { return 1; }
int check_save_response(size_t request_idx, int cmd_id) { return 1; }
int check_response(size_t request_idx UNUSED, int cmd_id UNUSED) { return 1; }
int check_save_response(size_t request_idx UNUSED, int cmd_id UNUSED) {
return 1;
}
private:
std::atomic<int32_t> _waiting_num;
......
......@@ -287,10 +287,10 @@ class Communicator {
return {};
}
virtual void SaveFLStrategy(
const std::unordered_map<uint32_t, std::string> &fl_strategy) {}
const std::unordered_map<uint32_t, std::string> &fl_strategy UNUSED) {}
virtual void StartCoordinator(
const std::string &self_endpoint,
const std::vector<std::string> &trainer_endpoints) {}
const std::string &self_endpoint UNUSED,
const std::vector<std::string> &trainer_endpoints UNUSED) {}
virtual ~Communicator() {}
virtual void RpcProfilerControl();
......@@ -337,13 +337,13 @@ class Communicator {
virtual void BarrierTriggerDecrement() {}
virtual void BarrierTriggerReset(int init_counter) {}
virtual void BarrierTriggerReset(int init_counter UNUSED) {}
virtual void InitEnvs() = 0;
virtual void InitImpl(const RpcCtxMap &send_varname_to_ctx,
const RecvCtxMap &recv_varname_to_ctx,
Scope *recv_scope) {}
virtual void InitImpl(const RpcCtxMap &send_varname_to_ctx UNUSED,
const RecvCtxMap &recv_varname_to_ctx UNUSED,
Scope *recv_scope UNUSED) {}
static Communicator *GetInstance() { return communicator_.get(); }
......@@ -682,9 +682,9 @@ class FLCommunicator : public GeoCommunicator {
virtual void InitBrpcClient(const std::string &dist_desc,
const std::vector<std::string> &host_sign_list);
void InitImpl(const RpcCtxMap &send_varname_to_ctx,
const RecvCtxMap &recv_varname_to_ctx,
Scope *recv_scope) {}
void InitImpl(const RpcCtxMap &send_varname_to_ctx UNUSED,
const RecvCtxMap &recv_varname_to_ctx UNUSED,
Scope *recv_scope UNUSED) {}
void StartCoordinatorClient(
const std::vector<std::string> &trainer_endpoints);
......
......@@ -151,8 +151,8 @@ class CoordinatorService : public PsService {
::google::protobuf::Closure* done);
int32_t SaveFLClientInfo(const CoordinatorReqMessage& request,
CoordinatorResMessage* response,
brpc::Controller* cntl) {
CoordinatorResMessage* response UNUSED,
brpc::Controller* cntl UNUSED) {
_coordinator_service_handle->SaveFLClientInfo(request);
return 0;
}
......
......@@ -26,7 +26,7 @@ class element_visitor {
explicit element_visitor(int index) : index_(index) {}
template <typename T>
Attribute operator()(const T& attr) const {
Attribute operator()(const T& attr UNUSED) const {
PADDLE_THROW(platform::errors::Unimplemented("Unimplemented operand."));
}
......
......@@ -70,7 +70,7 @@ class TrainerBase {
virtual Scope* GetWorkerScope(int thread_id) = 0;
virtual void InitDumpEnv() = 0;
virtual void DumpWork(int tid);
virtual void ResetDataset(Dataset* dataset_ptr) {}
virtual void ResetDataset(Dataset* dataset_ptr UNUSED) {}
protected:
virtual std::string GetDumpPath(int tid) = 0;
......
......@@ -30,12 +30,12 @@ class RNNMKLDNNHandler : public phi::funcs::OneDNNHandlerT<T, T_alg> {
public:
RNNMKLDNNHandler(const paddle::framework::ExecutionContext& ctx,
const phi::OneDNNContext& dev_ctx,
const dnnl::engine onednn_engine,
const dnnl::engine onednn_engine UNUSED,
platform::Place cpu_place,
const phi::DenseTensor* input,
const phi::DenseTensor* weight_h,
const phi::DenseTensor* h0,
const bool is_reverse,
const phi::DenseTensor* input UNUSED,
const phi::DenseTensor* weight_h UNUSED,
const phi::DenseTensor* h0 UNUSED,
const bool is_reverse UNUSED,
const int64_t N,
const int64_t Ti,
const int64_t IC,
......
......@@ -56,7 +56,7 @@ struct ConcatDenseTensor<platform::CustomDeviceContext, T> {
void operator()(const platform::CustomDeviceContext &context,
const std::vector<phi::DenseTensor> &in,
phi::DenseTensor *out,
int axis = 0) {
int axis UNUSED = 0) {
auto *out_data = out->data<T>();
auto *device = phi::DeviceManager::GetDeviceWithPlace(context.GetPlace());
size_t offset = 0;
......@@ -80,7 +80,7 @@ struct SplitDenseTensor<platform::CustomDeviceContext, T> {
void operator()(const platform::CustomDeviceContext &context,
const phi::DenseTensor &in,
std::vector<phi::DenseTensor *> *out,
int axis = 0) {
int axis UNUSED = 0) {
auto *in_data = in.data<T>();
auto *device = phi::DeviceManager::GetDeviceWithPlace(context.GetPlace());
size_t offset = 0;
......
......@@ -191,7 +191,8 @@ inline void AppendKey(std::string* key, const std::vector<T>& dims) {
}
template <typename... ArgTypes>
inline std::string CreateKey(const OneDNNContext& dev_ctx, ArgTypes&&... args) {
inline std::string CreateKey(const OneDNNContext& dev_ctx UNUSED,
ArgTypes&&... args) {
std::string key;
key.reserve(64);
using expand_type = int[];
......
......@@ -676,7 +676,7 @@ class OneDNNHandlerNoCachingT {
const dnnl::memory::desc& user_md,
const dnnl::memory::desc& target_md,
void* ptr,
bool is_persistent = false,
bool is_persistent UNUSED = false,
std::function<std::shared_ptr<F>(const F*)> custom_reorder_func = {}) {
std::shared_ptr<dnnl::memory> target_memory_p;
if (custom_reorder_func) {
......
......@@ -24,7 +24,7 @@
DECLARE_int32(search_cache_max_number);
inline void HashCombine(std::size_t* seed) {}
inline void HashCombine(std::size_t* seed UNUSED) {}
// combine hash value
// https://stackoverflow.com/questions/2590677/how-do-i-combine-hash-values-in-c0x
......
......@@ -127,7 +127,7 @@ template <typename Context,
class BinaryFunctor,
typename T>
struct BinaryOperation {
void operator()(const Context& dev_ctx,
void operator()(const Context& dev_ctx UNUSED,
const DenseTensor& lhs,
const DenseTensor& rhs,
DenseTensor* output) {
......
......@@ -38,7 +38,7 @@ struct LogsumexpGradFunctor {
DX* dx,
DY* dy,
const Dim& dim,
int size) {
int size UNUSED) {
using MT = typename phi::dtype::MPTypeTrait<T>::Type;
auto x_mt = (*x).template cast<MT>();
auto y_mt = (*y).template cast<MT>();
......
......@@ -103,7 +103,7 @@ void MatMulFunctionImplWithBlas(
bool trans_x,
bool trans_y,
bool flag = false,
phi::funcs::MatmulPlanner* matmul_planner = nullptr) {
phi::funcs::MatmulPlanner* matmul_planner UNUSED = nullptr) {
const int x_ndim = x_dims.size();
const int y_ndim = y_dims.size();
......
......@@ -39,8 +39,8 @@ class GpuAndCpuSearchSortedCompute {
return std::isnan(x);
#endif
}
static HOSTDEVICE bool IsNan(int x) { return false; }
static HOSTDEVICE bool IsNan(int64_t x) { return false; }
static HOSTDEVICE bool IsNan(int x UNUSED) { return false; }
static HOSTDEVICE bool IsNan(int64_t x UNUSED) { return false; }
static HOSTDEVICE bool IsInf(float x) {
#ifdef __NVCC__
......@@ -56,8 +56,8 @@ class GpuAndCpuSearchSortedCompute {
return std::isinf(x);
#endif
}
static HOSTDEVICE bool IsInf(int x) { return false; }
static HOSTDEVICE bool IsInf(int64_t x) { return false; }
static HOSTDEVICE bool IsInf(int x UNUSED) { return false; }
static HOSTDEVICE bool IsInf(int64_t x UNUSED) { return false; }
HOSTDEVICE GpuAndCpuSearchSortedCompute(const T1* sequence_data,
const T2* value_data,
......
......@@ -25,7 +25,7 @@ namespace phi {
template <typename T, typename Context>
void SplitKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& sections,
const IntArray& sections UNUSED,
const Scalar& axis_scalar,
std::vector<DenseTensor*> outs) {
std::vector<const DenseTensor*> shape_refer;
......
......@@ -54,7 +54,7 @@ template <typename T, typename Context>
void FullLikeCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
const Scalar& val,
DataType dtype,
DataType dtype UNUSED,
SparseCsrTensor* out) {
phi::Copy<Context>(dev_ctx,
x.non_zero_crows(),
......
......@@ -158,7 +158,7 @@ void ValuesCsrKernel(const Context& dev_ctx UNUSED,
}
template <typename T, typename Context>
void IndicesCooKernel(const Context& dev_ctx,
void IndicesCooKernel(const Context& dev_ctx UNUSED,
const SparseCooTensor& x,
DenseTensor* out) {
*out = x.indices();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册