未验证 提交 834eb2ba 编写于 作者: G Galaxy1458 提交者: GitHub

remove some [-Wunused-parameter] (#53185)

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test,test=develop

* test ,test=develop
上级 71a513c2
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include "paddle/fluid/distributed/the_one_ps.pb.h" #include "paddle/fluid/distributed/the_one_ps.pb.h"
#include "paddle/fluid/string/string_helper.h" #include "paddle/fluid/string/string_helper.h"
#include "paddle/phi/core/macros.h"
namespace paddle { namespace paddle {
namespace distributed { namespace distributed {
struct FsDataConverter { struct FsDataConverter {
...@@ -43,7 +43,7 @@ class FsReadChannel { ...@@ -43,7 +43,7 @@ class FsReadChannel {
virtual ~FsReadChannel() {} virtual ~FsReadChannel() {}
FsReadChannel(FsReadChannel&&) = delete; FsReadChannel(FsReadChannel&&) = delete;
FsReadChannel(const FsReadChannel&) = delete; FsReadChannel(const FsReadChannel&) = delete;
int open(std::shared_ptr<FILE> fp, const FsChannelConfig& config) { int open(std::shared_ptr<FILE> fp, const FsChannelConfig& config UNUSED) {
_file = fp; _file = fp;
return 0; return 0;
} }
...@@ -83,7 +83,7 @@ class FsWriteChannel { ...@@ -83,7 +83,7 @@ class FsWriteChannel {
FsWriteChannel(FsWriteChannel&&) = delete; FsWriteChannel(FsWriteChannel&&) = delete;
FsWriteChannel(const FsWriteChannel&) = delete; FsWriteChannel(const FsWriteChannel&) = delete;
int open(std::shared_ptr<FILE> fp, const FsChannelConfig& config) { int open(std::shared_ptr<FILE> fp, const FsChannelConfig& config UNUSED) {
_file = fp; _file = fp;
// the buffer has set in fs.cc // the buffer has set in fs.cc
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <vector> #include <vector>
#include "gflags/gflags.h" #include "gflags/gflags.h"
#include "paddle/phi/core/macros.h"
namespace paddle { namespace paddle {
namespace distributed { namespace distributed {
...@@ -115,19 +116,23 @@ class PSEnvironment { ...@@ -115,19 +116,23 @@ class PSEnvironment {
explicit PSEnvironment() {} // NOLINT explicit PSEnvironment() {} // NOLINT
virtual ~PSEnvironment() {} virtual ~PSEnvironment() {}
virtual int32_t SetPsServers(uint64_t *host_sign_list, int node_num) { virtual int32_t SetPsServers(uint64_t *host_sign_list UNUSED,
int node_num UNUSED) {
return 0; return 0;
} }
virtual int32_t SetPsServers( virtual int32_t SetPsServers(
const std::vector<std::string> *host_endpoint_list, int node_num) { const std::vector<std::string> *host_endpoint_list UNUSED,
int node_num UNUSED) {
return 0; return 0;
} }
virtual int32_t SetPsClients(uint64_t *host_sign_list, int node_num) { virtual int32_t SetPsClients(uint64_t *host_sign_list UNUSED,
int node_num UNUSED) {
return 0; return 0;
} }
virtual int32_t SetPsClients(std::string *host_endpoint_list, int node_num) { virtual int32_t SetPsClients(std::string *host_endpoint_list UNUSED,
int node_num UNUSED) {
return 0; return 0;
} }
......
...@@ -137,11 +137,11 @@ class PSClient { ...@@ -137,11 +137,11 @@ class PSClient {
size_t num, size_t num,
bool is_training) = 0; bool is_training) = 0;
virtual std::future<int32_t> PullSparseParam(float **select_values, virtual std::future<int32_t> PullSparseParam(float **select_values UNUSED,
size_t table_id, size_t table_id UNUSED,
const uint64_t *keys, const uint64_t *keys UNUSED,
size_t num, size_t num UNUSED,
bool is_training) { bool is_training UNUSED) {
VLOG(0) << "Did not implement"; VLOG(0) << "Did not implement";
std::promise<int32_t> promise; std::promise<int32_t> promise;
std::future<int> fut = promise.get_future(); std::future<int> fut = promise.get_future();
...@@ -149,13 +149,14 @@ class PSClient { ...@@ -149,13 +149,14 @@ class PSClient {
return fut; return fut;
} }
virtual ::std::future<int32_t> PullSparsePtr(int shard_id, virtual ::std::future<int32_t> PullSparsePtr(
char **select_values, int shard_id UNUSED,
size_t table_id, char **select_values UNUSED,
const uint64_t *keys, size_t table_id UNUSED,
size_t num, const uint64_t *keys UNUSED,
uint16_t pass_id, size_t num UNUSED,
const uint16_t &dim_id = 0) { uint16_t pass_id UNUSED,
const uint16_t &dim_id UNUSED = 0) {
VLOG(0) << "Did not implement"; VLOG(0) << "Did not implement";
std::promise<int32_t> promise; std::promise<int32_t> promise;
std::future<int> fut = promise.get_future(); std::future<int> fut = promise.get_future();
...@@ -164,9 +165,9 @@ class PSClient { ...@@ -164,9 +165,9 @@ class PSClient {
} }
virtual std::future<int32_t> PrintTableStat(uint32_t table_id) = 0; virtual std::future<int32_t> PrintTableStat(uint32_t table_id) = 0;
virtual std::future<int32_t> SaveCacheTable(uint32_t table_id, virtual std::future<int32_t> SaveCacheTable(uint32_t table_id UNUSED,
uint16_t pass_id, uint16_t pass_id UNUSED,
size_t threshold) { size_t threshold UNUSED) {
VLOG(0) << "Did not implement"; VLOG(0) << "Did not implement";
std::promise<int32_t> promise; std::promise<int32_t> promise;
std::future<int> fut = promise.get_future(); std::future<int> fut = promise.get_future();
...@@ -201,9 +202,10 @@ class PSClient { ...@@ -201,9 +202,10 @@ class PSClient {
virtual void FinalizeWorker() = 0; virtual void FinalizeWorker() = 0;
// client to client, 消息发送 // client to client, 消息发送
virtual std::future<int32_t> SendClient2ClientMsg(int msg_type, virtual std::future<int32_t> SendClient2ClientMsg(int msg_type UNUSED,
int to_client_id, int to_client_id UNUSED,
const std::string &msg) { const std::string &msg
UNUSED) {
VLOG(0) << "Did not implement"; VLOG(0) << "Did not implement";
std::promise<int32_t> promise; std::promise<int32_t> promise;
std::future<int> fut = promise.get_future(); std::future<int> fut = promise.get_future();
...@@ -272,11 +274,11 @@ class PSClient { ...@@ -272,11 +274,11 @@ class PSClient {
size_t num) = 0; size_t num) = 0;
// for save cache // for save cache
virtual std::future<int32_t> CacheShuffle( virtual std::future<int32_t> CacheShuffle(uint32_t table_id UNUSED,
uint32_t table_id, const std::string &path UNUSED,
const std::string &path, const std::string &mode UNUSED,
const std::string &mode, const std::string &cache_threshold
const std::string &cache_threshold) { UNUSED) {
VLOG(0) << "Did not implement"; VLOG(0) << "Did not implement";
std::promise<int32_t> promise; std::promise<int32_t> promise;
std::future<int> fut = promise.get_future(); std::future<int> fut = promise.get_future();
...@@ -285,10 +287,10 @@ class PSClient { ...@@ -285,10 +287,10 @@ class PSClient {
} }
virtual std::future<int32_t> CacheShuffleMultiTable( virtual std::future<int32_t> CacheShuffleMultiTable(
std::vector<int> tables, std::vector<int> tables UNUSED,
const std::string &path, const std::string &path UNUSED,
const std::string &mode, const std::string &mode UNUSED,
const std::string &cache_threshold) { const std::string &cache_threshold UNUSED) {
VLOG(0) << "Did not implement"; VLOG(0) << "Did not implement";
std::promise<int32_t> promise; std::promise<int32_t> promise;
std::future<int> fut = promise.get_future(); std::future<int> fut = promise.get_future();
...@@ -296,9 +298,9 @@ class PSClient { ...@@ -296,9 +298,9 @@ class PSClient {
return fut; return fut;
} }
virtual std::future<int32_t> SaveCache(uint32_t table_id, virtual std::future<int32_t> SaveCache(uint32_t table_id UNUSED,
const std::string &path, const std::string &path UNUSED,
const std::string &mode) { const std::string &mode UNUSED) {
VLOG(0) << "Did not implement"; VLOG(0) << "Did not implement";
std::promise<int32_t> promise; std::promise<int32_t> promise;
std::future<int> fut = promise.get_future(); std::future<int> fut = promise.get_future();
...@@ -306,9 +308,9 @@ class PSClient { ...@@ -306,9 +308,9 @@ class PSClient {
return fut; return fut;
} }
virtual std::future<int32_t> GetCacheThreshold( virtual std::future<int32_t> GetCacheThreshold(uint32_t table_id UNUSED,
uint32_t table_id, double &cache_threshold
double &cache_threshold) { // NOLINT UNUSED) { // NOLINT
VLOG(0) << "Did not implement"; VLOG(0) << "Did not implement";
std::promise<int32_t> promise; std::promise<int32_t> promise;
std::future<int> fut = promise.get_future(); std::future<int> fut = promise.get_future();
...@@ -333,7 +335,9 @@ class PSClient { ...@@ -333,7 +335,9 @@ class PSClient {
} }
// add // add
virtual std::shared_ptr<SparseShardValues> TakePassSparseReferedValues( virtual std::shared_ptr<SparseShardValues> TakePassSparseReferedValues(
const size_t &table_id, const uint16_t &pass_id, const uint16_t &dim_id) { const size_t &table_id UNUSED,
const uint16_t &pass_id UNUSED,
const uint16_t &dim_id UNUSED) {
VLOG(0) << "Did not implement"; VLOG(0) << "Did not implement";
return nullptr; return nullptr;
} }
......
...@@ -91,8 +91,8 @@ class ValueAccessor { ...@@ -91,8 +91,8 @@ class ValueAccessor {
virtual AccessorInfo GetAccessorInfo() { return _accessor_info; } virtual AccessorInfo GetAccessorInfo() { return _accessor_info; }
virtual bool NeedExtendMF(float* value) { return false; } virtual bool NeedExtendMF(float* value UNUSED) { return false; }
virtual bool HasMF(size_t size) { return false; } virtual bool HasMF(size_t size UNUSED) { return false; }
// converter for save // converter for save
virtual std::string GetConverter(int param) { virtual std::string GetConverter(int param) {
auto itr = _data_coverter_map.find(param); auto itr = _data_coverter_map.find(param);
...@@ -118,11 +118,11 @@ class ValueAccessor { ...@@ -118,11 +118,11 @@ class ValueAccessor {
// param作为参数用于标识save阶段,如downpour的xbox与batch_model // param作为参数用于标识save阶段,如downpour的xbox与batch_model
virtual bool Save(float* value, int param) = 0; virtual bool Save(float* value, int param) = 0;
// update delta_score and unseen_days after save // update delta_score and unseen_days after save
virtual void UpdateStatAfterSave(float* value, int param) {} virtual void UpdateStatAfterSave(float* value UNUSED, int param UNUSED) {}
// 判断该value是否保存到ssd // 判断该value是否保存到ssd
virtual bool SaveSSD(float* value) = 0; virtual bool SaveSSD(float* value) = 0;
// 判断热启时是否过滤slot对应的feasign // 判断热启时是否过滤slot对应的feasign
virtual bool FilterSlot(float* value) { return false; } virtual bool FilterSlot(float* value UNUSED) { return false; }
// //
virtual bool SaveCache(float* value, virtual bool SaveCache(float* value,
...@@ -131,7 +131,9 @@ class ValueAccessor { ...@@ -131,7 +131,9 @@ class ValueAccessor {
// keys不存在时,为values生成随机值 // keys不存在时,为values生成随机值
virtual int32_t Create(float** value, size_t num) = 0; virtual int32_t Create(float** value, size_t num) = 0;
virtual bool CreateValue(int type, const float* value) { return true; } virtual bool CreateValue(int type UNUSED, const float* value UNUSED) {
return true;
}
// 从values中选取到select_values中 // 从values中选取到select_values中
virtual int32_t Select(float** select_values, virtual int32_t Select(float** select_values,
const float** values, const float** values,
...@@ -159,22 +161,24 @@ class ValueAccessor { ...@@ -159,22 +161,24 @@ class ValueAccessor {
return data_convert; return data_convert;
} }
virtual int SetWeight(float** values, virtual int SetWeight(float** values UNUSED,
const float** update_values, const float** update_values UNUSED,
size_t num) { size_t num UNUSED) {
return 0; return 0;
} }
virtual bool SaveMemCache(float* value, virtual bool SaveMemCache(float* value UNUSED,
int param, int param UNUSED,
double global_cache_threshold, double global_cache_threshold UNUSED,
uint16_t pass_id) { uint16_t pass_id UNUSED) {
return true; return true;
} }
virtual void UpdatePassId(float* value, uint16_t pass_id) {} virtual void UpdatePassId(float* value UNUSED, uint16_t pass_id UNUSED) {}
virtual float GetField(float* value, const std::string& name) { return 0.0; } virtual float GetField(float* value UNUSED, const std::string& name UNUSED) {
return 0.0;
}
#define DEFINE_GET_INDEX(class, field) \ #define DEFINE_GET_INDEX(class, field) \
virtual int get_##field##_index() { return class ::field##_index(); } virtual int get_##field##_index() { return class ::field##_index(); }
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>
#include <vector> #include <vector>
#include "paddle/phi/core/macros.h"
namespace paddle { namespace paddle {
namespace distributed { namespace distributed {
...@@ -26,7 +27,7 @@ class GraphEdgeBlob { ...@@ -26,7 +27,7 @@ class GraphEdgeBlob {
size_t size() { return id_arr.size(); } size_t size() { return id_arr.size(); }
virtual void add_edge(int64_t id, float weight); virtual void add_edge(int64_t id, float weight);
int64_t get_id(int idx) { return id_arr[idx]; } int64_t get_id(int idx) { return id_arr[idx]; }
virtual float get_weight(int idx) { return 1; } virtual float get_weight(int idx UNUSED) { return 1; }
std::vector<int64_t>& export_id_array() { return id_arr; } std::vector<int64_t>& export_id_array() { return id_arr; }
protected: protected:
......
...@@ -38,31 +38,35 @@ class Node { ...@@ -38,31 +38,35 @@ class Node {
int64_t get_py_id() { return (int64_t)id; } int64_t get_py_id() { return (int64_t)id; }
void set_id(uint64_t id) { this->id = id; } void set_id(uint64_t id) { this->id = id; }
virtual void build_edges(bool is_weighted) {} virtual void build_edges(bool is_weighted UNUSED) {}
virtual void build_sampler(std::string sample_type) {} virtual void build_sampler(std::string sample_type UNUSED) {}
virtual void add_edge(uint64_t id, float weight) {} virtual void add_edge(uint64_t id UNUSED, float weight UNUSED) {}
virtual std::vector<int> sample_k( virtual std::vector<int> sample_k(
int k, const std::shared_ptr<std::mt19937_64> rng) { int k UNUSED, const std::shared_ptr<std::mt19937_64> rng UNUSED) {
return std::vector<int>(); return std::vector<int>();
} }
virtual uint64_t get_neighbor_id(int idx) { return 0; } virtual uint64_t get_neighbor_id(int idx UNUSED) { return 0; }
virtual float get_neighbor_weight(int idx) { return 1.; } virtual float get_neighbor_weight(int idx UNUSED) { return 1.; }
virtual int get_size(bool need_feature); virtual int get_size(bool need_feature);
virtual void to_buffer(char *buffer, bool need_feature); virtual void to_buffer(char *buffer, bool need_feature);
virtual void recover_from_buffer(char *buffer); virtual void recover_from_buffer(char *buffer);
virtual std::string get_feature(int idx) { return std::string(""); } virtual std::string get_feature(int idx UNUSED) { return std::string(""); }
virtual int get_feature_ids(std::vector<uint64_t> *res) const { return 0; } virtual int get_feature_ids(std::vector<uint64_t> *res UNUSED) const {
virtual int get_feature_ids(int slot_idx, std::vector<uint64_t> *res) const {
return 0; return 0;
} }
virtual int get_feature_ids(int slot_idx, virtual int get_feature_ids(int slot_idx UNUSED,
std::vector<uint64_t> &feature_id, // NOLINT std::vector<uint64_t> *res UNUSED) const {
std::vector<uint8_t> &slot_id) const { // NOLINT return 0;
}
virtual int get_feature_ids(
int slot_idx UNUSED,
std::vector<uint64_t> &feature_id UNUSED, // NOLINT
std::vector<uint8_t> &slot_id UNUSED) const { // NOLINT
return 0; return 0;
} }
virtual void set_feature(int idx, const std::string &str) {} virtual void set_feature(int idx UNUSED, const std::string &str UNUSED) {}
virtual void set_feature_size(int size) {} virtual void set_feature_size(int size UNUSED) {}
virtual void shrink_to_fit() {} virtual void shrink_to_fit() {}
virtual int get_feature_size() { return 0; } virtual int get_feature_size() { return 0; }
virtual size_t get_neighbor_size() { return 0; } virtual size_t get_neighbor_size() { return 0; }
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
// Phi deps // Phi deps
#include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/core/compat/convert_utils.h" #include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/macros.h"
namespace egr { namespace egr {
...@@ -133,10 +134,10 @@ class VariableCompatTensor ...@@ -133,10 +134,10 @@ class VariableCompatTensor
bool initialized() const override { return IsInitialized(); } bool initialized() const override { return IsInitialized(); }
void* AllocateFrom(phi::Allocator* allocator, void* AllocateFrom(phi::Allocator* allocator UNUSED,
phi::DataType dtype, phi::DataType dtype UNUSED,
size_t requested_size = 0, size_t requested_size UNUSED = 0,
bool fake_alloc = false) override { bool fake_alloc UNUSED = false) override {
PADDLE_THROW(paddle::platform::errors::Unavailable( PADDLE_THROW(paddle::platform::errors::Unavailable(
"VariableCompatTensor does not support `AllocateFrom` method.")); "VariableCompatTensor does not support `AllocateFrom` method."));
} }
......
...@@ -1135,10 +1135,10 @@ struct ElementwiseOp : public PatternBase { ...@@ -1135,10 +1135,10 @@ struct ElementwiseOp : public PatternBase {
}; };
struct MatmulElementwiseAdd : public PatternBase { struct MatmulElementwiseAdd : public PatternBase {
MatmulElementwiseAdd(PDPattern* pattern, MatmulElementwiseAdd(PDPattern* pattern UNUSED,
const std::string& name_scope, const std::string& name_scope UNUSED,
const std::string& matmul_type, const std::string& matmul_type UNUSED,
bool as_x) bool as_x UNUSED)
: PatternBase(pattern, name_scope, "matmul_elementwise_add") {} : PatternBase(pattern, name_scope, "matmul_elementwise_add") {}
PDNode* operator()(const std::string& matmul_type, bool as_x); PDNode* operator()(const std::string& matmul_type, bool as_x);
...@@ -1155,7 +1155,7 @@ struct MatmulElementwiseAdd : public PatternBase { ...@@ -1155,7 +1155,7 @@ struct MatmulElementwiseAdd : public PatternBase {
struct ResidualElementwise : public PatternBase { struct ResidualElementwise : public PatternBase {
ResidualElementwise(PDPattern* pattern, ResidualElementwise(PDPattern* pattern,
const std::string& name_scope, const std::string& name_scope,
bool as_x) bool as_x UNUSED)
: PatternBase(pattern, name_scope, "residual_elementwise") {} : PatternBase(pattern, name_scope, "residual_elementwise") {}
PDNode* operator()(PDNode* op_var, PDNode* operator()(PDNode* op_var,
PDNode* residual_var, PDNode* residual_var,
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#include <vector> #include <vector>
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/phi/core/macros.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -242,7 +243,7 @@ class EventCount { ...@@ -242,7 +243,7 @@ class EventCount {
Waiter* waiters_{nullptr}; Waiter* waiters_{nullptr};
size_t waiter_num_{0}; size_t waiter_num_{0};
static void CheckState(uint64_t state, bool waiter = false) { static void CheckState(uint64_t state, bool waiter UNUSED = false) {
static_assert(kEpochBits >= 20, "not enough bits to prevent ABA problem"); static_assert(kEpochBits >= 20, "not enough bits to prevent ABA problem");
const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift; const uint64_t waiters = (state & kWaiterMask) >> kWaiterShift;
const uint64_t signals = (state & kSignalMask) >> kSignalShift; const uint64_t signals = (state & kSignalMask) >> kSignalShift;
......
...@@ -116,7 +116,7 @@ struct Argument { ...@@ -116,7 +116,7 @@ struct Argument {
} \ } \
void Set##Field##NotOwned(type__* x) { \ void Set##Field##NotOwned(type__* x) { \
valid_fields_.insert(#field__); \ valid_fields_.insert(#field__); \
field__##_ = unique_ptr_t(x, [](void* x) {}); \ field__##_ = unique_ptr_t(x, [](void* x UNUSED) {}); \
} \ } \
DECL_ARGUMENT_FIELD_VALID(field__); \ DECL_ARGUMENT_FIELD_VALID(field__); \
type__* field__##_ptr() { \ type__* field__##_ptr() { \
......
...@@ -641,13 +641,13 @@ template <typename DeviceContext, ...@@ -641,13 +641,13 @@ template <typename DeviceContext,
void FusedElemwiseAndActGradComputeNoBroadcast( void FusedElemwiseAndActGradComputeNoBroadcast(
const framework::ExecutionContext &ctx, const framework::ExecutionContext &ctx,
const framework::DDim &x_dim, const framework::DDim &x_dim,
const framework::DDim &y_dim, const framework::DDim &y_dim UNUSED,
const phi::DenseTensor *x, const phi::DenseTensor *x,
const phi::DenseTensor *y, const phi::DenseTensor *y,
const phi::DenseTensor *intermediate_out, const phi::DenseTensor *intermediate_out,
const phi::DenseTensor *out, const phi::DenseTensor *out,
const phi::DenseTensor *dout, const phi::DenseTensor *dout,
int axis, int axis UNUSED,
phi::DenseTensor *dx, phi::DenseTensor *dx,
phi::DenseTensor *dy, phi::DenseTensor *dy,
phi::DenseTensor *dintermediate, phi::DenseTensor *dintermediate,
......
...@@ -844,7 +844,7 @@ void _sliceDapper(const phi::DenseTensor *in, ...@@ -844,7 +844,7 @@ void _sliceDapper(const phi::DenseTensor *in,
template <typename T> template <typename T>
inline phi::DenseTensor *_sliceWrapper(const phi::DenseTensor &self, inline phi::DenseTensor *_sliceWrapper(const phi::DenseTensor &self,
const phi::CPUContext &ctx, const phi::CPUContext &ctx,
py::object obj, py::object obj UNUSED,
int dim, int dim,
int64_t start, int64_t start,
int64_t slicelength) { int64_t slicelength) {
......
...@@ -349,7 +349,7 @@ class ScopedDropoutDescriptor { ...@@ -349,7 +349,7 @@ class ScopedDropoutDescriptor {
} }
inline cudnnDropoutDescriptor_t descriptor(const cudnnHandle_t& handle, inline cudnnDropoutDescriptor_t descriptor(const cudnnHandle_t& handle,
const phi::Place& place, const phi::Place& place UNUSED,
bool initialized, bool initialized,
float dropout_prob_, float dropout_prob_,
phi::DenseTensor* dropout_state_, phi::DenseTensor* dropout_state_,
......
...@@ -282,8 +282,8 @@ inline std::string ThreadIDasStr(void) { ...@@ -282,8 +282,8 @@ inline std::string ThreadIDasStr(void) {
std::hash<std::thread::id>()(std::this_thread::get_id())); std::hash<std::thread::id>()(std::this_thread::get_id()));
} }
inline std::string ExtendKeyWithThreadInfoIfNeeded(const OneDNNContext& dev_ctx, inline std::string ExtendKeyWithThreadInfoIfNeeded(
const std::string& key) { const OneDNNContext& dev_ctx UNUSED, const std::string& key) {
return (OneDNNContext::tls().is_tid_used_in_key() == true) return (OneDNNContext::tls().is_tid_used_in_key() == true)
? key + "-t:" + ThreadIDasStr() ? key + "-t:" + ThreadIDasStr()
: key; : key;
......
...@@ -382,12 +382,12 @@ template <typename DeviceContext, ...@@ -382,12 +382,12 @@ template <typename DeviceContext,
typename Tout = T> typename Tout = T>
void ElemwiseGradComputeNoBroadcast(const DeviceContext &dev_ctx, void ElemwiseGradComputeNoBroadcast(const DeviceContext &dev_ctx,
const DDim &x_dim, const DDim &x_dim,
const DDim &y_dim, const DDim &y_dim UNUSED,
const DenseTensor &x, const DenseTensor &x,
const DenseTensor &y, const DenseTensor &y,
const DenseTensor &out, const DenseTensor &out,
const DenseTensor &dout, const DenseTensor &dout,
int axis, int axis UNUSED,
DenseTensor *dx, DenseTensor *dx,
DenseTensor *dy, DenseTensor *dy,
DX_OP dx_op, DX_OP dx_op,
......
...@@ -16,6 +16,8 @@ limitations under the License. */ ...@@ -16,6 +16,8 @@ limitations under the License. */
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/phi/core/macros.h"
namespace phi { namespace phi {
namespace funcs { namespace funcs {
...@@ -53,11 +55,11 @@ struct cpu_gather_scatter_functor { ...@@ -53,11 +55,11 @@ struct cpu_gather_scatter_functor {
template <typename func_t> template <typename func_t>
void operator()(phi::DenseTensor self, void operator()(phi::DenseTensor self,
int dim, int dim,
const phi::DenseTensor& index, const phi::DenseTensor& index UNUSED,
const phi::DenseTensor& src, const phi::DenseTensor& src,
const std::string& method_name, const std::string& method_name UNUSED,
const func_t& reduce_op, const func_t& reduce_op,
const phi::DeviceContext& ctx) { const phi::DeviceContext& ctx UNUSED) {
if (index.numel() == 0) { if (index.numel() == 0) {
return; return;
} }
......
...@@ -361,7 +361,8 @@ template <typename KernelTuple> ...@@ -361,7 +361,8 @@ template <typename KernelTuple>
class ReferKernel : public KernelMore<KernelTuple> { class ReferKernel : public KernelMore<KernelTuple> {
public: public:
// Refer code can always be used // Refer code can always be used
bool CanBeUsed(const typename KernelTuple::attr_type& attr) const override { bool CanBeUsed(
const typename KernelTuple::attr_type& attr UNUSED) const override {
return true; return true;
} }
const char* ImplType() const override { return "Refer"; } const char* ImplType() const override { return "Refer"; }
......
...@@ -43,7 +43,7 @@ struct JitKernelRegistrarFunctor; ...@@ -43,7 +43,7 @@ struct JitKernelRegistrarFunctor;
template <typename Pool, typename PlaceType, size_t I, typename... KernelImpls> template <typename Pool, typename PlaceType, size_t I, typename... KernelImpls>
struct JitKernelRegistrarFunctor<Pool, PlaceType, true, I, KernelImpls...> { struct JitKernelRegistrarFunctor<Pool, PlaceType, true, I, KernelImpls...> {
void operator()(KernelType kt) const {} void operator()(KernelType kt UNUSED) const {}
}; };
template <typename Pool, typename PlaceType, size_t I, typename... KernelImpls> template <typename Pool, typename PlaceType, size_t I, typename... KernelImpls>
......
...@@ -14,13 +14,13 @@ ...@@ -14,13 +14,13 @@
#pragma once #pragma once
#include "paddle/phi/core/macros.h"
#include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/tensor_utils.h"
#include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h" #include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/slice_utils.h" #include "paddle/phi/kernels/funcs/slice_utils.h"
#include "paddle/phi/kernels/slice_grad_kernel.h" #include "paddle/phi/kernels/slice_grad_kernel.h"
namespace phi { namespace phi {
template <typename T, typename Context, size_t D> template <typename T, typename Context, size_t D>
...@@ -212,8 +212,8 @@ void SliceGradCompute(const Context& ctx, ...@@ -212,8 +212,8 @@ void SliceGradCompute(const Context& ctx,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int64_t>& axes, const std::vector<int64_t>& axes,
const std::vector<int64_t>& starts, const std::vector<int64_t>& starts,
const std::vector<int64_t>& ends, const std::vector<int64_t>& ends UNUSED,
const std::vector<int64_t>& infer_flags, const std::vector<int64_t>& infer_flags UNUSED,
const std::vector<int64_t>& decrease_axis, const std::vector<int64_t>& decrease_axis,
DenseTensor* input_grad) { DenseTensor* input_grad) {
auto* d_out = &out_grad; auto* d_out = &out_grad;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册