未验证 提交 63ffd733 编写于 作者: G Galaxy1458 提交者: GitHub

test,test=develop (#53818)

上级 39f365c4
......@@ -37,10 +37,10 @@ class IndexSampler {
}
virtual void init_layerwise_conf(
const std::vector<uint16_t>& layer_sample_counts,
uint16_t start_sample_layer = 1,
uint16_t seed = 0) {}
virtual void init_beamsearch_conf(const int64_t k) {}
const std::vector<uint16_t>& layer_sample_counts UNUSED,
uint16_t start_sample_layer UNUSED = 1,
uint16_t seed UNUSED = 0) {}
virtual void init_beamsearch_conf(const int64_t k UNUSED) {}
virtual std::vector<std::vector<uint64_t>> sample(
const std::vector<std::vector<uint64_t>>& user_inputs,
const std::vector<uint64_t>& input_targets,
......
......@@ -26,9 +26,10 @@ class PsLocalClient : public PSClient {
public:
PsLocalClient() {}
virtual ~PsLocalClient() { _running = false; }
virtual int32_t CreateClient2ClientConnection(int pslib_timeout_ms,
int pslib_connect_timeout_ms,
int max_retry) {
virtual int32_t CreateClient2ClientConnection(int pslib_timeout_ms UNUSED,
int pslib_connect_timeout_ms
UNUSED,
int max_retry UNUSED) {
return 0;
}
......@@ -64,11 +65,11 @@ class PsLocalClient : public PSClient {
size_t region_num,
size_t table_id);
virtual ::std::future<int32_t> PullSparse(float** select_values,
size_t table_id,
const uint64_t* keys,
size_t num,
bool is_training) {
virtual ::std::future<int32_t> PullSparse(float** select_values UNUSED,
size_t table_id UNUSED,
const uint64_t* keys UNUSED,
size_t num UNUSED,
bool is_training UNUSED) {
std::promise<int32_t> prom;
std::future<int32_t> fut = prom.get_future();
prom.set_value(0);
......@@ -113,7 +114,8 @@ class PsLocalClient : public PSClient {
return fut;
}
virtual std::future<int32_t> Barrier(size_t table_id, uint32_t barrier_type) {
virtual std::future<int32_t> Barrier(size_t table_id UNUSED,
uint32_t barrier_type UNUSED) {
std::promise<int32_t> prom;
std::future<int32_t> fut = prom.get_future();
prom.set_value(0);
......@@ -121,10 +123,10 @@ class PsLocalClient : public PSClient {
return fut;
}
virtual std::future<int32_t> PullGeoParam(size_t table_id,
std::vector<float>* values,
std::vector<uint64_t>* keys,
int pserver_idx) {
virtual std::future<int32_t> PullGeoParam(size_t table_id UNUSED,
std::vector<float>* values UNUSED,
std::vector<uint64_t>* keys UNUSED,
int pserver_idx UNUSED) {
std::promise<int32_t> prom;
std::future<int32_t> fut = prom.get_future();
prom.set_value(0);
......@@ -132,9 +134,9 @@ class PsLocalClient : public PSClient {
return fut;
}
virtual std::future<int32_t> PushGlobalStep(int table_id,
int64_t* total_send_data,
void* done) {
virtual std::future<int32_t> PushGlobalStep(int table_id UNUSED,
int64_t* total_send_data UNUSED,
void* done UNUSED) {
std::promise<int32_t> prom;
std::future<int32_t> fut = prom.get_future();
prom.set_value(0);
......@@ -143,14 +145,15 @@ class PsLocalClient : public PSClient {
}
// recv table from server and save it in LodTensor
virtual int32_t RecvAndSaveTable(const uint64_t table_id,
const std::string& path) {
virtual int32_t RecvAndSaveTable(const uint64_t table_id UNUSED,
const std::string& path UNUSED) {
return 0;
}
virtual ::std::future<int32_t> SendClient2ClientMsg(int msg_type,
int to_client_id,
const std::string& msg) {
virtual ::std::future<int32_t> SendClient2ClientMsg(int msg_type UNUSED,
int to_client_id UNUSED,
const std::string& msg
UNUSED) {
std::promise<int32_t> prom;
std::future<int32_t> fut = prom.get_future();
prom.set_value(0);
......@@ -172,12 +175,12 @@ class PsLocalClient : public PSClient {
void* callback);
virtual std::future<int32_t> PushSparseRawGradientPartial(
size_t table_id,
const uint64_t* keys,
const float** update_values,
uint32_t num,
void* done,
int pserver_idx) {
size_t table_id UNUSED,
const uint64_t* keys UNUSED,
const float** update_values UNUSED,
uint32_t num UNUSED,
void* done UNUSED,
int pserver_idx UNUSED) {
std::promise<int32_t> prom;
std::future<int32_t> fut = prom.get_future();
prom.set_value(0);
......@@ -185,11 +188,12 @@ class PsLocalClient : public PSClient {
return fut;
}
virtual std::future<int32_t> PushSparseParam(size_t table_id,
const uint64_t* keys,
const float** update_values,
size_t num,
void* done) {
virtual std::future<int32_t> PushSparseParam(size_t table_id UNUSED,
const uint64_t* keys UNUSED,
const float** update_values
UNUSED,
size_t num UNUSED,
void* done UNUSED) {
std::promise<int32_t> prom;
std::future<int32_t> fut = prom.get_future();
prom.set_value(0);
......
......@@ -65,8 +65,8 @@ inline std::vector<paddle::Tensor> EagerAmpAutoCasts(
const std::string& inputs_name,
const std::vector<paddle::Tensor>& inputs,
const phi::DataType& dst_dtype,
std::string op_name,
bool trace_backward = true) {
std::string op_name UNUSED,
bool trace_backward UNUSED = true) {
VLOG(6) << "AMP AmpAutoCasts:"
<< " inputs(" << inputs_name << ") dst_dtype("
<< phi::DataTypeToString(dst_dtype) << ").";
......
......@@ -115,7 +115,7 @@ class EagerLayoutTransformer {
explicit EagerLayoutTransformer(
const std::string& op_name,
const paddle::small_vector<std::vector<paddle::Tensor>,
kSlotSmallVectorSize>& tensors_vector,
kSlotSmallVectorSize>& tensors_vector UNUSED,
const Layout final_layout = Layout::UNDEFINED)
: op_name_(op_name), final_layout_(final_layout), dim_size_(1) {
VLOG(4) << "Agnostic op : " << op_name_ << "'s layout is " << final_layout_;
......@@ -123,7 +123,7 @@ class EagerLayoutTransformer {
virtual ~EagerLayoutTransformer() {}
virtual paddle::Tensor TransInTensor(const std::string& in_name,
virtual paddle::Tensor TransInTensor(const std::string& in_name UNUSED,
const paddle::Tensor& in) {
// update in shape size
dim_size_ = in.shape().size();
......@@ -146,7 +146,8 @@ class EagerLayoutTransformer {
}
virtual std::vector<paddle::Tensor> TransInTensors(
const std::string& in_name, const std::vector<paddle::Tensor>& in) {
const std::string& in_name UNUSED,
const std::vector<paddle::Tensor>& in) {
return in;
}
......@@ -168,12 +169,12 @@ class EagerLayoutTransformer {
}
virtual void SetOutTensorLayout(
paddle::optional<paddle::Tensor>* out_tensor) {
paddle::optional<paddle::Tensor>* out_tensor UNUSED) {
VLOG(4) << "AutoTune out tensor is optional";
}
virtual void SetOutTensorLayout(
paddle::optional<std::vector<paddle::Tensor>>* out_tensor) {
paddle::optional<std::vector<paddle::Tensor>>* out_tensor UNUSED) {
VLOG(4) << "AutoTune out tensor is optional";
}
......@@ -250,7 +251,7 @@ class EagerLightlyLayoutSensitiveOpTransformer : public EagerLayoutTransformer {
}
// transpose from desired to default
paddle::Tensor TransInTensor(const std::string& in_name,
paddle::Tensor TransInTensor(const std::string& in_name UNUSED,
const paddle::Tensor& in) {
std::string input_layout = phi::DataLayoutToString(in.layout());
auto default_layout = DefaultLayout();
......@@ -265,7 +266,8 @@ class EagerLightlyLayoutSensitiveOpTransformer : public EagerLayoutTransformer {
}
virtual std::vector<paddle::Tensor> TransInTensors(
const std::string& in_name, const std::vector<paddle::Tensor>& in) {
const std::string& in_name UNUSED,
const std::vector<paddle::Tensor>& in) {
std::vector<paddle::Tensor> result;
auto desired_layout = DesiredLayout();
auto default_layout = DefaultLayout();
......@@ -327,7 +329,7 @@ class EagerTransposeOpTransformer
(*axis)[3] = perm[(*axis)[3]];
}
paddle::Tensor TransInTensor(const std::string& in_name,
paddle::Tensor TransInTensor(const std::string& in_name UNUSED,
const paddle::Tensor& in) {
return in;
}
......@@ -367,7 +369,7 @@ class EagerFlattenOpTransformer
}
// transpose from NHWC to NCHW
paddle::Tensor TransInTensor(const std::string& in_name,
paddle::Tensor TransInTensor(const std::string& in_name UNUSED,
const paddle::Tensor& in) {
return in;
}
......@@ -395,7 +397,8 @@ class EagerConcatOpTransformer
}
virtual std::vector<paddle::Tensor> TransInTensors(
const std::string& in_name, const std::vector<paddle::Tensor>& in) {
const std::string& in_name UNUSED,
const std::vector<paddle::Tensor>& in) {
return in;
}
......
......@@ -21,14 +21,14 @@ namespace phi {
template <typename T, typename Context>
void Pool2dGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& out,
const DenseTensor& out UNUSED,
const DenseTensor& dout,
const IntArray& kernel_size,
const std::vector<int>& strides,
const std::vector<int>& paddings,
bool ceil_mode,
bool exclusive,
const std::string& data_format,
const std::string& data_format UNUSED,
const std::string& pooling_type,
bool global_pooling,
bool adaptive,
......
......@@ -23,7 +23,7 @@ void SumRawKernel(const Context& dev_ctx,
const IntArray& dims,
bool keep_dim,
bool reduce_all,
DataType out_dtype,
DataType out_dtype UNUSED,
DenseTensor* out) {
reduce_all = recompute_reduce_all(x, dims, reduce_all);
ReduceKernel<T, Context>(dev_ctx,
......
......@@ -25,10 +25,10 @@ void SGDDenseKernel(const Context& dev_ctx,
const DenseTensor& param,
const DenseTensor& learning_rate,
const DenseTensor& grad,
const paddle::optional<DenseTensor>& master_param,
bool multi_precision,
const paddle::optional<DenseTensor>& master_param UNUSED,
bool multi_precision UNUSED,
DenseTensor* param_out,
DenseTensor* master_param_out) {
DenseTensor* master_param_out UNUSED) {
auto* out_data = dev_ctx.template Alloc<T>(param_out);
const T* param_data = param.data<T>();
const auto* grad_data = grad.data<T>();
......@@ -43,13 +43,13 @@ void SGDDenseKernel(const Context& dev_ctx,
template <typename T, typename Context>
void SGDDenseParamSparseGradKernel(
const Context& dev_ctx,
const DenseTensor& param,
const DenseTensor& param UNUSED,
const DenseTensor& learning_rate,
const SelectedRows& grad,
const paddle::optional<DenseTensor>& master_param,
bool multi_precision,
const paddle::optional<DenseTensor>& master_param UNUSED,
bool multi_precision UNUSED,
DenseTensor* param_out,
DenseTensor* master_param_out) {
DenseTensor* master_param_out UNUSED) {
const auto& grad_value = grad.value();
const auto& grad_rows = grad.rows();
const auto grad_height = grad.height();
......
......@@ -21,13 +21,13 @@ namespace phi {
template <typename T, typename Context>
void SliceGradKernel(const Context& dev_ctx,
const DenseTensor& input,
const DenseTensor& input UNUSED,
const DenseTensor& out_grad,
const std::vector<int64_t>& axes,
const IntArray& starts,
const IntArray& ends,
const std::vector<int64_t>& infer_flags,
const std::vector<int64_t>& decrease_axis,
const std::vector<int64_t>& infer_flags UNUSED,
const std::vector<int64_t>& decrease_axis UNUSED,
DenseTensor* input_grad) {
const auto& onednn_engine = dev_ctx.GetEngine();
......
......@@ -23,7 +23,7 @@ template <typename T, typename Context>
void SoftplusKernel(const Context& dev_ctx,
const DenseTensor& x,
float beta,
float threshold,
float threshold UNUSED,
DenseTensor* out) {
funcs::SoftplusOneDNNHandler<T> handler(dev_ctx, &x, beta);
......
......@@ -23,7 +23,7 @@ template <typename T, typename Context>
void SqueezeGradKernel(const Context& dev_ctx,
const DenseTensor& xshape,
const DenseTensor& dout,
const IntArray& axes,
const IntArray& axes UNUSED,
DenseTensor* dx) {
auto dout_vec_dims = dout.dims().size() != 0 ? vectorize(dout.dims())
: std::vector<int64_t>{1};
......
......@@ -39,21 +39,21 @@ void AdamDenseParamSparseGradKernel(
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& master_param UNUSED,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
bool lazy_mode,
int64_t min_row_size_to_use_multithread,
bool multi_precision,
bool multi_precision UNUSED,
bool use_global_beta_pow,
DenseTensor* param_out,
DenseTensor* moment1_out,
DenseTensor* moment2_out,
DenseTensor* beta1_pow_out,
DenseTensor* beta2_pow_out,
DenseTensor* master_param_outs) {
DenseTensor* master_param_outs UNUSED) {
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
bool skip_update_ = false;
......
......@@ -21,31 +21,31 @@ namespace phi {
namespace sparse {
template <typename T, typename Context>
void AddmmCooDenseGradKernel(const Context& dev_ctx,
const DenseTensor& input,
const SparseCooTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
float alpha,
float beta,
DenseTensor* dinput,
SparseCooTensor* dx,
DenseTensor* dy) {
void AddmmCooDenseGradKernel(const Context& dev_ctx UNUSED,
const DenseTensor& input UNUSED,
const SparseCooTensor& x UNUSED,
const DenseTensor& y UNUSED,
const DenseTensor& dout UNUSED,
float alpha UNUSED,
float beta UNUSED,
DenseTensor* dinput UNUSED,
SparseCooTensor* dx UNUSED,
DenseTensor* dy UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of 'sparse.addmm' now."));
}
template <typename T, typename Context>
void AddmmCsrDenseGradKernel(const Context& dev_ctx,
const DenseTensor& input,
const SparseCsrTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
float alpha,
float beta,
DenseTensor* dinput,
SparseCsrTensor* dx,
DenseTensor* dy) {
void AddmmCsrDenseGradKernel(const Context& dev_ctx UNUSED,
const DenseTensor& input UNUSED,
const SparseCsrTensor& x UNUSED,
const DenseTensor& y UNUSED,
const DenseTensor& dout UNUSED,
float alpha UNUSED,
float beta UNUSED,
DenseTensor* dinput UNUSED,
SparseCsrTensor* dx UNUSED,
DenseTensor* dy UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of 'sparse.addmm' now."));
}
......
......@@ -21,26 +21,26 @@ namespace sparse {
/* DENSE + COO @ DENSE -> DENSE */
template <typename T, typename Context>
void AddmmCooDenseKernel(const Context& dev_ctx,
const DenseTensor& input,
const SparseCooTensor& x,
const DenseTensor& y,
float beta,
float alpha,
DenseTensor* out) {
void AddmmCooDenseKernel(const Context& dev_ctx UNUSED,
const DenseTensor& input UNUSED,
const SparseCooTensor& x UNUSED,
const DenseTensor& y UNUSED,
float beta UNUSED,
float alpha UNUSED,
DenseTensor* out UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU kernel of 'sparse.addmm' now."));
}
/* DENSE + CSR @ DENSE -> DENSE */
template <typename T, typename Context>
void AddmmCsrDenseKernel(const Context& dev_ctx,
const DenseTensor& input,
const SparseCsrTensor& x,
const DenseTensor& y,
float beta,
float alpha,
DenseTensor* out) {
void AddmmCsrDenseKernel(const Context& dev_ctx UNUSED,
const DenseTensor& input UNUSED,
const SparseCsrTensor& x UNUSED,
const DenseTensor& y UNUSED,
float beta UNUSED,
float alpha UNUSED,
DenseTensor* out UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU kernel of 'sparse.addmm' now."));
}
......
......@@ -22,24 +22,24 @@ namespace sparse {
// TODO(zhouwei25): implement CPU backward kernel of " CSR @ DENSE -> DENSE"
template <typename T, typename Context>
void MatmulCsrDenseGradKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
SparseCsrTensor* dx,
DenseTensor* dy) {
void MatmulCsrDenseGradKernel(const Context& dev_ctx UNUSED,
const SparseCsrTensor& x UNUSED,
const DenseTensor& y UNUSED,
const DenseTensor& dout UNUSED,
SparseCsrTensor* dx UNUSED,
DenseTensor* dy UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of 'sparse.matmul' now."));
}
// TODO(zhouwei25): implement CPU kernel of " DENSE @ DENSE * CSR_MASK -> CSR"
template <typename T, typename Context>
void MaskedMatmulCsrGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const SparseCsrTensor& dout,
DenseTensor* dx,
DenseTensor* dy) {
void MaskedMatmulCsrGradKernel(const Context& dev_ctx UNUSED,
const DenseTensor& x UNUSED,
const DenseTensor& y UNUSED,
const SparseCsrTensor& dout UNUSED,
DenseTensor* dx UNUSED,
DenseTensor* dy UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of 'sparse.masked_matmul' now."));
}
......
......@@ -22,21 +22,21 @@ namespace sparse {
// TODO(zhouwei25): implement CPU kernel of " CSR @ DENSE -> DENSE"
template <typename T, typename Context>
void MatmulCsrDenseKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
const DenseTensor& y,
DenseTensor* out) {
void MatmulCsrDenseKernel(const Context& dev_ctx UNUSED,
const SparseCsrTensor& x UNUSED,
const DenseTensor& y UNUSED,
DenseTensor* out UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU kernel of 'sparse.matmul' now."));
}
// TODO(zhouwei25): implement CPU kernel of " DENSE @ DENSE * CSR_MASK -> CSR"
template <typename T, typename Context>
void MaskedMatmulCsrKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const SparseCsrTensor& mask,
SparseCsrTensor* out) {
void MaskedMatmulCsrKernel(const Context& dev_ctx UNUSED,
const DenseTensor& x UNUSED,
const DenseTensor& y UNUSED,
const SparseCsrTensor& mask UNUSED,
SparseCsrTensor* out UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU kernel of 'sparse.masked_matmul' now."));
}
......
......@@ -21,23 +21,23 @@ namespace phi {
namespace sparse {
template <typename T, typename Context>
void MvCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& vec,
const DenseTensor& dout,
SparseCooTensor* dx,
DenseTensor* dvec) {
void MvCooGradKernel(const Context& dev_ctx UNUSED,
const SparseCooTensor& x UNUSED,
const DenseTensor& vec UNUSED,
const DenseTensor& dout UNUSED,
SparseCooTensor* dx UNUSED,
DenseTensor* dvec UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of 'sparse.mv' now."));
}
template <typename T, typename Context>
void MvCsrGradKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
const DenseTensor& vec,
const DenseTensor& dout,
SparseCsrTensor* dx,
DenseTensor* dvec) {
void MvCsrGradKernel(const Context& dev_ctx UNUSED,
const SparseCsrTensor& x UNUSED,
const DenseTensor& vec UNUSED,
const DenseTensor& dout UNUSED,
SparseCsrTensor* dx UNUSED,
DenseTensor* dvec UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of 'sparse.mv' now."));
}
......
......@@ -21,19 +21,19 @@ namespace phi {
namespace sparse {
template <typename T, typename Context>
void MvCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& x,
const DenseTensor& vec,
DenseTensor* out) {
void MvCsrKernel(const Context& dev_ctx UNUSED,
const SparseCsrTensor& x UNUSED,
const DenseTensor& vec UNUSED,
DenseTensor* out UNUSED) {
PADDLE_THROW(
phi::errors::Unimplemented("Not support CPU kernel of 'sparse.mv' now."));
}
template <typename T, typename Context>
void MvCooKernel(const Context& dev_ctx,
const SparseCooTensor& x,
const DenseTensor& vec,
DenseTensor* out) {
void MvCooKernel(const Context& dev_ctx UNUSED,
const SparseCooTensor& x UNUSED,
const DenseTensor& vec UNUSED,
DenseTensor* out UNUSED) {
PADDLE_THROW(
phi::errors::Unimplemented("Not support CPU kernel of 'sparse.mv' now."));
}
......
......@@ -81,7 +81,7 @@ struct StringCaseConvertKernel {
template <typename DeviceContext, typename CharConverter>
struct AsciiCaseConverter {
void operator()(const DeviceContext& dev_ctx,
void operator()(const DeviceContext& dev_ctx UNUSED,
const pstring* in,
pstring* out,
size_t num) const {
......@@ -96,7 +96,7 @@ template <typename DeviceContext,
template <typename DeviceContextT>
class CharConverter>
struct UTF8CaseConverter {
void operator()(const DeviceContext& dev_ctx,
void operator()(const DeviceContext& dev_ctx UNUSED,
const pstring* in,
pstring* out,
size_t num) const {
......
......@@ -18,7 +18,7 @@ namespace phi {
#define DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(func_name, op_name, attrs) \
KernelSignature func_name##GradOpArgumentMapping( \
const ArgumentMappingContext& ctx) { \
const ArgumentMappingContext& ctx UNUSED) { \
return KernelSignature( \
op_name "_grad", {"X", "Out@GRAD"}, {attrs}, {"X@GRAD"}); \
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册