未验证 提交 834eb2ba 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-parameter] (#53185)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test ,test=develop
上级 71a513c2
......@@ -22,7 +22,7 @@
#include "paddle/fluid/distributed/the_one_ps.pb.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/phi/core/macros.h"
namespace paddle {
namespace distributed {
struct FsDataConverter {
......@@ -43,7 +43,7 @@ class FsReadChannel {
virtual ~FsReadChannel() {}
FsReadChannel(FsReadChannel&&) = delete;
FsReadChannel(const FsReadChannel&) = delete;
int open(std::shared_ptr<FILE> fp, const FsChannelConfig& config) {
int open(std::shared_ptr<FILE> fp, const FsChannelConfig& config UNUSED) {
_file = fp;
return 0;
}
......@@ -83,7 +83,7 @@ class FsWriteChannel {
FsWriteChannel(FsWriteChannel&&) = delete;
FsWriteChannel(const FsWriteChannel&) = delete;
int open(std::shared_ptr<FILE> fp, const FsChannelConfig& config) {
int open(std::shared_ptr<FILE> fp, const FsChannelConfig& config UNUSED) {
_file = fp;
// the buffer has set in fs.cc
......
......@@ -26,6 +26,7 @@
#include <vector>
#include "gflags/gflags.h"
#include "paddle/phi/core/macros.h"
namespace paddle {
namespace distributed {
......@@ -115,19 +116,23 @@ class PSEnvironment {
explicit PSEnvironment() {} // NOLINT
virtual ~PSEnvironment() {}
virtual int32_t SetPsServers(uint64_t *host_sign_list, int node_num) {
virtual int32_t SetPsServers(uint64_t *host_sign_list UNUSED,
int node_num UNUSED) {
return 0;
}
virtual int32_t SetPsServers(
const std::vector<std::string> *host_endpoint_list, int node_num) {
const std::vector<std::string> *host_endpoint_list UNUSED,
int node_num UNUSED) {
return 0;
}
virtual int32_t SetPsClients(uint64_t *host_sign_list, int node_num) {
virtual int32_t SetPsClients(uint64_t *host_sign_list UNUSED,
int node_num UNUSED) {
return 0;
}
virtual int32_t SetPsClients(std::string *host_endpoint_list, int node_num) {
virtual int32_t SetPsClients(std::string *host_endpoint_list UNUSED,
int node_num UNUSED) {
return 0;
}
......
......@@ -137,11 +137,11 @@ class PSClient {
size_t num,
bool is_training) = 0;
virtual std::future<int32_t> PullSparseParam(float **select_values,
size_t table_id,
const uint64_t *keys,
size_t num,
bool is_training) {
virtual std::future<int32_t> PullSparseParam(float **select_values UNUSED,
size_t table_id UNUSED,
const uint64_t *keys UNUSED,
size_t num UNUSED,
bool is_training UNUSED) {
VLOG(0) << "Did not implement";
std::promise<int32_t> promise;
std::future<int> fut = promise.get_future();
......@@ -149,13 +149,14 @@ class PSClient {
return fut;
}
virtual ::std::future<int32_t> PullSparsePtr(int shard_id,
char **select_values,
size_t table_id,
const uint64_t *keys,
size_t num,
uint16_t pass_id,
const uint16_t &dim_id = 0) {
virtual ::std::future<int32_t> PullSparsePtr(
int shard_id UNUSED,
char **select_values UNUSED,
size_t table_id UNUSED,
const uint64_t *keys UNUSED,
size_t num UNUSED,
uint16_t pass_id UNUSED,
const uint16_t &dim_id UNUSED = 0) {
VLOG(0) << "Did not implement";
std::promise<int32_t> promise;
std::future<int> fut = promise.get_future();
......@@ -164,9 +165,9 @@ class PSClient {
}
virtual std::future<int32_t> PrintTableStat(uint32_t table_id) = 0;
virtual std::future<int32_t> SaveCacheTable(uint32_t table_id,
uint16_t pass_id,
size_t threshold) {
virtual std::future<int32_t> SaveCacheTable(uint32_t table_id UNUSED,
uint16_t pass_id UNUSED,
size_t threshold UNUSED) {
VLOG(0) << "Did not implement";
std::promise<int32_t> promise;
std::future<int> fut = promise.get_future();
......@@ -201,9 +202,10 @@ class PSClient {
virtual void FinalizeWorker() = 0;
// client to client, 消息发送
virtual std::future<int32_t> SendClient2ClientMsg(int msg_type,
int to_client_id,
const std::string &msg) {
virtual std::future<int32_t> SendClient2ClientMsg(int msg_type UNUSED,
int to_client_id UNUSED,
const std::string &msg
UNUSED) {
VLOG(0) << "Did not implement";
std::promise<int32_t> promise;
std::future<int> fut = promise.get_future();
......@@ -272,11 +274,11 @@ class PSClient {
size_t num) = 0;
// for save cache
virtual std::future<int32_t> CacheShuffle(
uint32_t table_id,
const std::string &path,
const std::string &mode,
const std::string &cache_threshold) {
virtual std::future<int32_t> CacheShuffle(uint32_t table_id UNUSED,
const std::string &path UNUSED,
const std::string &mode UNUSED,
const std::string &cache_threshold
UNUSED) {
VLOG(0) << "Did not implement";
std::promise<int32_t> promise;
std::future<int> fut = promise.get_future();
......@@ -285,10 +287,10 @@ class PSClient {
}
virtual std::future<int32_t> CacheShuffleMultiTable(
std::vector<int> tables,
const std::string &path,
const std::string &mode,
const std::string &cache_threshold) {
std::vector<int> tables UNUSED,
const std::string &path UNUSED,
const std::string &mode UNUSED,
const std::string &cache_threshold UNUSED) {
VLOG(0) << "Did not implement";
std::promise<int32_t> promise;
std::future<int> fut = promise.get_future();
......@@ -296,9 +298,9 @@ class PSClient {
return fut;
}
virtual std::future<int32_t> SaveCache(uint32_t table_id,
const std::string &path,
const std::string &mode) {
virtual std::future<int32_t> SaveCache(uint32_t table_id UNUSED,
const std::string &path UNUSED,
const std::string &mode UNUSED) {
VLOG(0) << "Did not implement";
std::promise<int32_t> promise;
std::future<int> fut = promise.get_future();
......@@ -306,9 +308,9 @@ class PSClient {
return fut;
}
virtual std::future<int32_t> GetCacheThreshold(
uint32_t table_id,
double &cache_threshold) { // NOLINT
virtual std::future<int32_t> GetCacheThreshold(uint32_t table_id UNUSED,
double &cache_threshold
UNUSED) { // NOLINT
VLOG(0) << "Did not implement";
std::promise<int32_t> promise;
std::future<int> fut = promise.get_future();
......@@ -333,7 +335,9 @@ class PSClient {
}
// add
virtual std::shared_ptr<SparseShardValues> TakePassSparseReferedValues(
const size_t &table_id, const uint16_t &pass_id, const uint16_t &dim_id) {
const size_t &table_id UNUSED,
const uint16_t &pass_id UNUSED,
const uint16_t &dim_id UNUSED) {
VLOG(0) << "Did not implement";
return nullptr;
}
......
......@@ -91,8 +91,8 @@ class ValueAccessor {
virtual AccessorInfo GetAccessorInfo() { return _accessor_info; }
virtual bool NeedExtendMF(float* value) { return false; }
virtual bool HasMF(size_t size) { return false; }
virtual bool NeedExtendMF(float* value UNUSED) { return false; }
virtual bool HasMF(size_t size UNUSED) { return false; }
// converter for save
virtual std::string GetConverter(int param) {
auto itr = _data_coverter_map.find(param);
......@@ -118,11 +118,11 @@ class ValueAccessor {
// param作为参数用于标识save阶段,如downpour的xbox与batch_model
virtual bool Save(float* value, int param) = 0;
// update delta_score and unseen_days after save
virtual void UpdateStatAfterSave(float* value, int param) {}
virtual void UpdateStatAfterSave(float* value UNUSED, int param UNUSED) {}
// 判断该value是否保存到ssd
virtual bool SaveSSD(float* value) = 0;
// 判断热启时是否过滤slot对应的feasign
virtual bool FilterSlot(float* value) { return false; }
virtual bool FilterSlot(float* value UNUSED) { return false; }
//
virtual bool SaveCache(float* value,
......@@ -131,7 +131,9 @@ class ValueAccessor {
// keys不存在时,为values生成随机值
virtual int32_t Create(float** value, size_t num) = 0;
virtual bool CreateValue(int type, const float* value) { return true; }
virtual bool CreateValue(int type UNUSED, const float* value UNUSED) {
return true;
}
// 从values中选取到select_values中
virtual int32_t Select(float** select_values,
const float** values,
......@@ -159,22 +161,24 @@ class ValueAccessor {
return data_convert;
}
virtual int SetWeight(float** values,
const float** update_values,
size_t num) {
virtual int SetWeight(float** values UNUSED,
const float** update_values UNUSED,
size_t num UNUSED) {
return 0;
}
virtual bool SaveMemCache(float* value,
int param,
double global_cache_threshold,
uint16_t pass_id) {
virtual bool SaveMemCache(float* value UNUSED,
int param UNUSED,
double global_cache_threshold UNUSED,
uint16_t pass_id UNUSED) {
return true;
}
virtual void UpdatePassId(float* value, uint16_t pass_id) {}
virtual void UpdatePassId(float* value UNUSED, uint16_t pass_id UNUSED) {}
virtual float GetField(float* value, const std::string& name) { return 0.0; }
virtual float GetField(float* value UNUSED, const std::string& name UNUSED) {
return 0.0;
}
#define DEFINE_GET_INDEX(class, field) \
virtual int get_##field##_index() { return class ::field##_index(); }
......
......@@ -16,6 +16,7 @@
#include <cstddef>
#include <cstdint>
#include <vector>
#include "paddle/phi/core/macros.h"
namespace paddle {
namespace distributed {
......@@ -26,7 +27,7 @@ class GraphEdgeBlob {
size_t size() { return id_arr.size(); }
virtual void add_edge(int64_t id, float weight);
int64_t get_id(int idx) { return id_arr[idx]; }
virtual float get_weight(int idx) { return 1; }
virtual float get_weight(int idx UNUSED) { return 1; }
std::vector<int64_t>& export_id_array() { return id_arr; }
protected:
......
......@@ -38,31 +38,35 @@ class Node {
int64_t get_py_id() { return (int64_t)id; }
void set_id(uint64_t id) { this->id = id; }
virtual void build_edges(bool is_weighted) {}
virtual void build_sampler(std::string sample_type) {}
virtual void add_edge(uint64_t id, float weight) {}
virtual void build_edges(bool is_weighted UNUSED) {}
virtual void build_sampler(std::string sample_type UNUSED) {}
virtual void add_edge(uint64_t id UNUSED, float weight UNUSED) {}
virtual std::vector<int> sample_k(
int k, const std::shared_ptr<std::mt19937_64> rng) {
int k UNUSED, const std::shared_ptr<std::mt19937_64> rng UNUSED) {
return std::vector<int>();
}
virtual uint64_t get_neighbor_id(int idx) { return 0; }
virtual float get_neighbor_weight(int idx) { return 1.; }
virtual uint64_t get_neighbor_id(int idx UNUSED) { return 0; }
virtual float get_neighbor_weight(int idx UNUSED) { return 1.; }
virtual int get_size(bool need_feature);
virtual void to_buffer(char *buffer, bool need_feature);
virtual void recover_from_buffer(char *buffer);
virtual std::string get_feature(int idx) { return std::string(""); }
virtual int get_feature_ids(std::vector<uint64_t> *res) const { return 0; }
virtual int get_feature_ids(int slot_idx, std::vector<uint64_t> *res) const {
virtual std::string get_feature(int idx UNUSED) { return std::string(""); }
virtual int get_feature_ids(std::vector<uint64_t> *res UNUSED) const {
return 0;
}
virtual int get_feature_ids(int slot_idx,
std::vector<uint64_t> &feature_id, // NOLINT
std::vector<uint8_t> &slot_id) const { // NOLINT
virtual int get_feature_ids(int slot_idx UNUSED,
std::vector<uint64_t> *res UNUSED) const {
return 0;
}
virtual int get_feature_ids(
int slot_idx UNUSED,
std::vector<uint64_t> &feature_id UNUSED, // NOLINT
std::vector<uint8_t> &slot_id UNUSED) const { // NOLINT
return 0;
}
virtual void set_feature(int idx, const std::string &str) {}
virtual void set_feature_size(int size) {}
virtual void set_feature(int idx UNUSED, const std::string &str UNUSED) {}
virtual void set_feature_size(int size UNUSED) {}
virtual void shrink_to_fit() {}
virtual int get_feature_size() { return 0; }
virtual size_t get_neighbor_size() { return 0; }
......
......@@ -20,6 +20,7 @@
// Phi deps
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/macros.h"
namespace egr {
......@@ -133,10 +134,10 @@ class VariableCompatTensor
bool initialized() const override { return IsInitialized(); }
void* AllocateFrom(phi::Allocator* allocator,
phi::DataType dtype,
size_t requested_size = 0,
bool fake_alloc = false) override {
void* AllocateFrom(phi::Allocator* allocator UNUSED,
phi::DataType dtype UNUSED,
size_t requested_size UNUSED = 0,
bool fake_alloc UNUSED = false) override {
PADDLE_THROW(paddle::platform::errors::Unavailable(
"VariableCompatTensor does not support `AllocateFrom` method."));
}
......
......@@ -1135,10 +1135,10 @@ struct ElementwiseOp : public PatternBase {
};
struct MatmulElementwiseAdd : public PatternBase {
MatmulElementwiseAdd(PDPattern* pattern,
const std::string& name_scope,
const std::string& matmul_type,
bool as_x)
MatmulElementwiseAdd(PDPattern* pattern UNUSED,
const std::string& name_scope UNUSED,
const std::string& matmul_type UNUSED,
bool as_x UNUSED)
: PatternBase(pattern, name_scope, "matmul_elementwise_add") {}
PDNode* operator()(const std::string& matmul_type, bool as_x);
......@@ -1155,7 +1155,7 @@ struct MatmulElementwiseAdd : public PatternBase {
struct ResidualElementwise : public PatternBase {
ResidualElementwise(PDPattern* pattern,
const std::string& name_scope,
bool as_x)
bool as_x UNUSED)
: PatternBase(pattern, name_scope, "residual_elementwise") {}
PDNode* operator()(PDNode* op_var,
PDNode* residual_var,
......
......@@ -56,6 +56,7 @@
#include <vector>
#include "glog/logging.h"
#include "paddle/phi/core/macros.h"
namespace paddle {
namespace framework {
......@@ -242,7 +243,7 @@ class EventCount {
Waiter* waiters_{nullptr};
size_t waiter_num_{0};
static void CheckState(uint64_t state, bool waiter = false) {
static void CheckState(uint64_t state, bool waiter UNUSED = false) {
static_assert(kEpochBits >= 20, "not enough bits to prevent ABA problem");
const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift;
const uint64_t signals = (state & kSignalMask) >> kSignalShift;
......
......@@ -116,7 +116,7 @@ struct Argument {
} \
void Set##Field##NotOwned(type__* x) { \
valid_fields_.insert(#field__); \
field__##_ = unique_ptr_t(x, [](void* x) {}); \
field__##_ = unique_ptr_t(x, [](void* x UNUSED) {}); \
} \
DECL_ARGUMENT_FIELD_VALID(field__); \
type__* field__##_ptr() { \
......
......@@ -641,13 +641,13 @@ template <typename DeviceContext,
void FusedElemwiseAndActGradComputeNoBroadcast(
const framework::ExecutionContext &ctx,
const framework::DDim &x_dim,
const framework::DDim &y_dim,
const framework::DDim &y_dim UNUSED,
const phi::DenseTensor *x,
const phi::DenseTensor *y,
const phi::DenseTensor *intermediate_out,
const phi::DenseTensor *out,
const phi::DenseTensor *dout,
int axis,
int axis UNUSED,
phi::DenseTensor *dx,
phi::DenseTensor *dy,
phi::DenseTensor *dintermediate,
......
......@@ -844,7 +844,7 @@ void _sliceDapper(const phi::DenseTensor *in,
template <typename T>
inline phi::DenseTensor *_sliceWrapper(const phi::DenseTensor &self,
const phi::CPUContext &ctx,
py::object obj,
py::object obj UNUSED,
int dim,
int64_t start,
int64_t slicelength) {
......
......@@ -349,7 +349,7 @@ class ScopedDropoutDescriptor {
}
inline cudnnDropoutDescriptor_t descriptor(const cudnnHandle_t& handle,
const phi::Place& place,
const phi::Place& place UNUSED,
bool initialized,
float dropout_prob_,
phi::DenseTensor* dropout_state_,
......
......@@ -282,8 +282,8 @@ inline std::string ThreadIDasStr(void) {
std::hash<std::thread::id>()(std::this_thread::get_id()));
}
inline std::string ExtendKeyWithThreadInfoIfNeeded(const OneDNNContext& dev_ctx,
const std::string& key) {
inline std::string ExtendKeyWithThreadInfoIfNeeded(
const OneDNNContext& dev_ctx UNUSED, const std::string& key) {
return (OneDNNContext::tls().is_tid_used_in_key() == true)
? key + "-t:" + ThreadIDasStr()
: key;
......
......@@ -382,12 +382,12 @@ template <typename DeviceContext,
typename Tout = T>
void ElemwiseGradComputeNoBroadcast(const DeviceContext &dev_ctx,
const DDim &x_dim,
const DDim &y_dim,
const DDim &y_dim UNUSED,
const DenseTensor &x,
const DenseTensor &y,
const DenseTensor &out,
const DenseTensor &dout,
int axis,
int axis UNUSED,
DenseTensor *dx,
DenseTensor *dy,
DX_OP dx_op,
......
......@@ -16,6 +16,8 @@ limitations under the License. */
#include "glog/logging.h"
#include "paddle/phi/core/macros.h"
namespace phi {
namespace funcs {
......@@ -53,11 +55,11 @@ struct cpu_gather_scatter_functor {
template <typename func_t>
void operator()(phi::DenseTensor self,
int dim,
const phi::DenseTensor& index,
const phi::DenseTensor& index UNUSED,
const phi::DenseTensor& src,
const std::string& method_name,
const std::string& method_name UNUSED,
const func_t& reduce_op,
const phi::DeviceContext& ctx) {
const phi::DeviceContext& ctx UNUSED) {
if (index.numel() == 0) {
return;
}
......
......@@ -361,7 +361,8 @@ template <typename KernelTuple>
class ReferKernel : public KernelMore<KernelTuple> {
public:
// Refer code can always be used
bool CanBeUsed(const typename KernelTuple::attr_type& attr) const override {
bool CanBeUsed(
const typename KernelTuple::attr_type& attr UNUSED) const override {
return true;
}
const char* ImplType() const override { return "Refer"; }
......
......@@ -43,7 +43,7 @@ struct JitKernelRegistrarFunctor;
template <typename Pool, typename PlaceType, size_t I, typename... KernelImpls>
struct JitKernelRegistrarFunctor<Pool, PlaceType, true, I, KernelImpls...> {
void operator()(KernelType kt) const {}
void operator()(KernelType kt UNUSED) const {}
};
template <typename Pool, typename PlaceType, size_t I, typename... KernelImpls>
......
......@@ -14,13 +14,13 @@
#pragma once
#include "paddle/phi/core/macros.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/slice_utils.h"
#include "paddle/phi/kernels/slice_grad_kernel.h"
namespace phi {
template <typename T, typename Context, size_t D>
......@@ -212,8 +212,8 @@ void SliceGradCompute(const Context& ctx,
const DenseTensor& out_grad,
const std::vector<int64_t>& axes,
const std::vector<int64_t>& starts,
const std::vector<int64_t>& ends,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& ends UNUSED,
const std::vector<int64_t>& infer_flags UNUSED,
const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad) {
auto* d_out = &out_grad;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册