未验证 提交 63ffd733 编写于 作者: G Galaxy1458 提交者: GitHub

test,test=develop (#53818)

上级 39f365c4
...@@ -37,10 +37,10 @@ class IndexSampler { ...@@ -37,10 +37,10 @@ class IndexSampler {
} }
virtual void init_layerwise_conf( virtual void init_layerwise_conf(
const std::vector<uint16_t>& layer_sample_counts, const std::vector<uint16_t>& layer_sample_counts UNUSED,
uint16_t start_sample_layer = 1, uint16_t start_sample_layer UNUSED = 1,
uint16_t seed = 0) {} uint16_t seed UNUSED = 0) {}
virtual void init_beamsearch_conf(const int64_t k) {} virtual void init_beamsearch_conf(const int64_t k UNUSED) {}
virtual std::vector<std::vector<uint64_t>> sample( virtual std::vector<std::vector<uint64_t>> sample(
const std::vector<std::vector<uint64_t>>& user_inputs, const std::vector<std::vector<uint64_t>>& user_inputs,
const std::vector<uint64_t>& input_targets, const std::vector<uint64_t>& input_targets,
......
...@@ -26,9 +26,10 @@ class PsLocalClient : public PSClient { ...@@ -26,9 +26,10 @@ class PsLocalClient : public PSClient {
public: public:
PsLocalClient() {} PsLocalClient() {}
virtual ~PsLocalClient() { _running = false; } virtual ~PsLocalClient() { _running = false; }
virtual int32_t CreateClient2ClientConnection(int pslib_timeout_ms, virtual int32_t CreateClient2ClientConnection(int pslib_timeout_ms UNUSED,
int pslib_connect_timeout_ms, int pslib_connect_timeout_ms
int max_retry) { UNUSED,
int max_retry UNUSED) {
return 0; return 0;
} }
...@@ -64,11 +65,11 @@ class PsLocalClient : public PSClient { ...@@ -64,11 +65,11 @@ class PsLocalClient : public PSClient {
size_t region_num, size_t region_num,
size_t table_id); size_t table_id);
virtual ::std::future<int32_t> PullSparse(float** select_values, virtual ::std::future<int32_t> PullSparse(float** select_values UNUSED,
size_t table_id, size_t table_id UNUSED,
const uint64_t* keys, const uint64_t* keys UNUSED,
size_t num, size_t num UNUSED,
bool is_training) { bool is_training UNUSED) {
std::promise<int32_t> prom; std::promise<int32_t> prom;
std::future<int32_t> fut = prom.get_future(); std::future<int32_t> fut = prom.get_future();
prom.set_value(0); prom.set_value(0);
...@@ -113,7 +114,8 @@ class PsLocalClient : public PSClient { ...@@ -113,7 +114,8 @@ class PsLocalClient : public PSClient {
return fut; return fut;
} }
virtual std::future<int32_t> Barrier(size_t table_id, uint32_t barrier_type) { virtual std::future<int32_t> Barrier(size_t table_id UNUSED,
uint32_t barrier_type UNUSED) {
std::promise<int32_t> prom; std::promise<int32_t> prom;
std::future<int32_t> fut = prom.get_future(); std::future<int32_t> fut = prom.get_future();
prom.set_value(0); prom.set_value(0);
...@@ -121,10 +123,10 @@ class PsLocalClient : public PSClient { ...@@ -121,10 +123,10 @@ class PsLocalClient : public PSClient {
return fut; return fut;
} }
virtual std::future<int32_t> PullGeoParam(size_t table_id, virtual std::future<int32_t> PullGeoParam(size_t table_id UNUSED,
std::vector<float>* values, std::vector<float>* values UNUSED,
std::vector<uint64_t>* keys, std::vector<uint64_t>* keys UNUSED,
int pserver_idx) { int pserver_idx UNUSED) {
std::promise<int32_t> prom; std::promise<int32_t> prom;
std::future<int32_t> fut = prom.get_future(); std::future<int32_t> fut = prom.get_future();
prom.set_value(0); prom.set_value(0);
...@@ -132,9 +134,9 @@ class PsLocalClient : public PSClient { ...@@ -132,9 +134,9 @@ class PsLocalClient : public PSClient {
return fut; return fut;
} }
virtual std::future<int32_t> PushGlobalStep(int table_id, virtual std::future<int32_t> PushGlobalStep(int table_id UNUSED,
int64_t* total_send_data, int64_t* total_send_data UNUSED,
void* done) { void* done UNUSED) {
std::promise<int32_t> prom; std::promise<int32_t> prom;
std::future<int32_t> fut = prom.get_future(); std::future<int32_t> fut = prom.get_future();
prom.set_value(0); prom.set_value(0);
...@@ -143,14 +145,15 @@ class PsLocalClient : public PSClient { ...@@ -143,14 +145,15 @@ class PsLocalClient : public PSClient {
} }
// recv table from server and save it in LodTensor // recv table from server and save it in LodTensor
virtual int32_t RecvAndSaveTable(const uint64_t table_id, virtual int32_t RecvAndSaveTable(const uint64_t table_id UNUSED,
const std::string& path) { const std::string& path UNUSED) {
return 0; return 0;
} }
virtual ::std::future<int32_t> SendClient2ClientMsg(int msg_type, virtual ::std::future<int32_t> SendClient2ClientMsg(int msg_type UNUSED,
int to_client_id, int to_client_id UNUSED,
const std::string& msg) { const std::string& msg
UNUSED) {
std::promise<int32_t> prom; std::promise<int32_t> prom;
std::future<int32_t> fut = prom.get_future(); std::future<int32_t> fut = prom.get_future();
prom.set_value(0); prom.set_value(0);
...@@ -172,12 +175,12 @@ class PsLocalClient : public PSClient { ...@@ -172,12 +175,12 @@ class PsLocalClient : public PSClient {
void* callback); void* callback);
virtual std::future<int32_t> PushSparseRawGradientPartial( virtual std::future<int32_t> PushSparseRawGradientPartial(
size_t table_id, size_t table_id UNUSED,
const uint64_t* keys, const uint64_t* keys UNUSED,
const float** update_values, const float** update_values UNUSED,
uint32_t num, uint32_t num UNUSED,
void* done, void* done UNUSED,
int pserver_idx) { int pserver_idx UNUSED) {
std::promise<int32_t> prom; std::promise<int32_t> prom;
std::future<int32_t> fut = prom.get_future(); std::future<int32_t> fut = prom.get_future();
prom.set_value(0); prom.set_value(0);
...@@ -185,11 +188,12 @@ class PsLocalClient : public PSClient { ...@@ -185,11 +188,12 @@ class PsLocalClient : public PSClient {
return fut; return fut;
} }
virtual std::future<int32_t> PushSparseParam(size_t table_id, virtual std::future<int32_t> PushSparseParam(size_t table_id UNUSED,
const uint64_t* keys, const uint64_t* keys UNUSED,
const float** update_values, const float** update_values
size_t num, UNUSED,
void* done) { size_t num UNUSED,
void* done UNUSED) {
std::promise<int32_t> prom; std::promise<int32_t> prom;
std::future<int32_t> fut = prom.get_future(); std::future<int32_t> fut = prom.get_future();
prom.set_value(0); prom.set_value(0);
......
...@@ -65,8 +65,8 @@ inline std::vector<paddle::Tensor> EagerAmpAutoCasts( ...@@ -65,8 +65,8 @@ inline std::vector<paddle::Tensor> EagerAmpAutoCasts(
const std::string& inputs_name, const std::string& inputs_name,
const std::vector<paddle::Tensor>& inputs, const std::vector<paddle::Tensor>& inputs,
const phi::DataType& dst_dtype, const phi::DataType& dst_dtype,
std::string op_name, std::string op_name UNUSED,
bool trace_backward = true) { bool trace_backward UNUSED = true) {
VLOG(6) << "AMP AmpAutoCasts:" VLOG(6) << "AMP AmpAutoCasts:"
<< " inputs(" << inputs_name << ") dst_dtype(" << " inputs(" << inputs_name << ") dst_dtype("
<< phi::DataTypeToString(dst_dtype) << ")."; << phi::DataTypeToString(dst_dtype) << ").";
......
...@@ -115,7 +115,7 @@ class EagerLayoutTransformer { ...@@ -115,7 +115,7 @@ class EagerLayoutTransformer {
explicit EagerLayoutTransformer( explicit EagerLayoutTransformer(
const std::string& op_name, const std::string& op_name,
const paddle::small_vector<std::vector<paddle::Tensor>, const paddle::small_vector<std::vector<paddle::Tensor>,
kSlotSmallVectorSize>& tensors_vector, kSlotSmallVectorSize>& tensors_vector UNUSED,
const Layout final_layout = Layout::UNDEFINED) const Layout final_layout = Layout::UNDEFINED)
: op_name_(op_name), final_layout_(final_layout), dim_size_(1) { : op_name_(op_name), final_layout_(final_layout), dim_size_(1) {
VLOG(4) << "Agnostic op : " << op_name_ << "'s layout is " << final_layout_; VLOG(4) << "Agnostic op : " << op_name_ << "'s layout is " << final_layout_;
...@@ -123,7 +123,7 @@ class EagerLayoutTransformer { ...@@ -123,7 +123,7 @@ class EagerLayoutTransformer {
virtual ~EagerLayoutTransformer() {} virtual ~EagerLayoutTransformer() {}
virtual paddle::Tensor TransInTensor(const std::string& in_name, virtual paddle::Tensor TransInTensor(const std::string& in_name UNUSED,
const paddle::Tensor& in) { const paddle::Tensor& in) {
// update in shape size // update in shape size
dim_size_ = in.shape().size(); dim_size_ = in.shape().size();
...@@ -146,7 +146,8 @@ class EagerLayoutTransformer { ...@@ -146,7 +146,8 @@ class EagerLayoutTransformer {
} }
virtual std::vector<paddle::Tensor> TransInTensors( virtual std::vector<paddle::Tensor> TransInTensors(
const std::string& in_name, const std::vector<paddle::Tensor>& in) { const std::string& in_name UNUSED,
const std::vector<paddle::Tensor>& in) {
return in; return in;
} }
...@@ -168,12 +169,12 @@ class EagerLayoutTransformer { ...@@ -168,12 +169,12 @@ class EagerLayoutTransformer {
} }
virtual void SetOutTensorLayout( virtual void SetOutTensorLayout(
paddle::optional<paddle::Tensor>* out_tensor) { paddle::optional<paddle::Tensor>* out_tensor UNUSED) {
VLOG(4) << "AutoTune out tensor is optional"; VLOG(4) << "AutoTune out tensor is optional";
} }
virtual void SetOutTensorLayout( virtual void SetOutTensorLayout(
paddle::optional<std::vector<paddle::Tensor>>* out_tensor) { paddle::optional<std::vector<paddle::Tensor>>* out_tensor UNUSED) {
VLOG(4) << "AutoTune out tensor is optional"; VLOG(4) << "AutoTune out tensor is optional";
} }
...@@ -250,7 +251,7 @@ class EagerLightlyLayoutSensitiveOpTransformer : public EagerLayoutTransformer { ...@@ -250,7 +251,7 @@ class EagerLightlyLayoutSensitiveOpTransformer : public EagerLayoutTransformer {
} }
// transpose from desired to default // transpose from desired to default
paddle::Tensor TransInTensor(const std::string& in_name, paddle::Tensor TransInTensor(const std::string& in_name UNUSED,
const paddle::Tensor& in) { const paddle::Tensor& in) {
std::string input_layout = phi::DataLayoutToString(in.layout()); std::string input_layout = phi::DataLayoutToString(in.layout());
auto default_layout = DefaultLayout(); auto default_layout = DefaultLayout();
...@@ -265,7 +266,8 @@ class EagerLightlyLayoutSensitiveOpTransformer : public EagerLayoutTransformer { ...@@ -265,7 +266,8 @@ class EagerLightlyLayoutSensitiveOpTransformer : public EagerLayoutTransformer {
} }
virtual std::vector<paddle::Tensor> TransInTensors( virtual std::vector<paddle::Tensor> TransInTensors(
const std::string& in_name, const std::vector<paddle::Tensor>& in) { const std::string& in_name UNUSED,
const std::vector<paddle::Tensor>& in) {
std::vector<paddle::Tensor> result; std::vector<paddle::Tensor> result;
auto desired_layout = DesiredLayout(); auto desired_layout = DesiredLayout();
auto default_layout = DefaultLayout(); auto default_layout = DefaultLayout();
...@@ -327,7 +329,7 @@ class EagerTransposeOpTransformer ...@@ -327,7 +329,7 @@ class EagerTransposeOpTransformer
(*axis)[3] = perm[(*axis)[3]]; (*axis)[3] = perm[(*axis)[3]];
} }
paddle::Tensor TransInTensor(const std::string& in_name, paddle::Tensor TransInTensor(const std::string& in_name UNUSED,
const paddle::Tensor& in) { const paddle::Tensor& in) {
return in; return in;
} }
...@@ -367,7 +369,7 @@ class EagerFlattenOpTransformer ...@@ -367,7 +369,7 @@ class EagerFlattenOpTransformer
} }
// transpose from NHWC to NCHW // transpose from NHWC to NCHW
paddle::Tensor TransInTensor(const std::string& in_name, paddle::Tensor TransInTensor(const std::string& in_name UNUSED,
const paddle::Tensor& in) { const paddle::Tensor& in) {
return in; return in;
} }
...@@ -395,7 +397,8 @@ class EagerConcatOpTransformer ...@@ -395,7 +397,8 @@ class EagerConcatOpTransformer
} }
virtual std::vector<paddle::Tensor> TransInTensors( virtual std::vector<paddle::Tensor> TransInTensors(
const std::string& in_name, const std::vector<paddle::Tensor>& in) { const std::string& in_name UNUSED,
const std::vector<paddle::Tensor>& in) {
return in; return in;
} }
......
...@@ -21,14 +21,14 @@ namespace phi { ...@@ -21,14 +21,14 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void Pool2dGradKernel(const Context& dev_ctx, void Pool2dGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& out, const DenseTensor& out UNUSED,
const DenseTensor& dout, const DenseTensor& dout,
const IntArray& kernel_size, const IntArray& kernel_size,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings,
bool ceil_mode, bool ceil_mode,
bool exclusive, bool exclusive,
const std::string& data_format, const std::string& data_format UNUSED,
const std::string& pooling_type, const std::string& pooling_type,
bool global_pooling, bool global_pooling,
bool adaptive, bool adaptive,
......
...@@ -23,7 +23,7 @@ void SumRawKernel(const Context& dev_ctx, ...@@ -23,7 +23,7 @@ void SumRawKernel(const Context& dev_ctx,
const IntArray& dims, const IntArray& dims,
bool keep_dim, bool keep_dim,
bool reduce_all, bool reduce_all,
DataType out_dtype, DataType out_dtype UNUSED,
DenseTensor* out) { DenseTensor* out) {
reduce_all = recompute_reduce_all(x, dims, reduce_all); reduce_all = recompute_reduce_all(x, dims, reduce_all);
ReduceKernel<T, Context>(dev_ctx, ReduceKernel<T, Context>(dev_ctx,
......
...@@ -25,10 +25,10 @@ void SGDDenseKernel(const Context& dev_ctx, ...@@ -25,10 +25,10 @@ void SGDDenseKernel(const Context& dev_ctx,
const DenseTensor& param, const DenseTensor& param,
const DenseTensor& learning_rate, const DenseTensor& learning_rate,
const DenseTensor& grad, const DenseTensor& grad,
const paddle::optional<DenseTensor>& master_param, const paddle::optional<DenseTensor>& master_param UNUSED,
bool multi_precision, bool multi_precision UNUSED,
DenseTensor* param_out, DenseTensor* param_out,
DenseTensor* master_param_out) { DenseTensor* master_param_out UNUSED) {
auto* out_data = dev_ctx.template Alloc<T>(param_out); auto* out_data = dev_ctx.template Alloc<T>(param_out);
const T* param_data = param.data<T>(); const T* param_data = param.data<T>();
const auto* grad_data = grad.data<T>(); const auto* grad_data = grad.data<T>();
...@@ -43,13 +43,13 @@ void SGDDenseKernel(const Context& dev_ctx, ...@@ -43,13 +43,13 @@ void SGDDenseKernel(const Context& dev_ctx,
template <typename T, typename Context> template <typename T, typename Context>
void SGDDenseParamSparseGradKernel( void SGDDenseParamSparseGradKernel(
const Context& dev_ctx, const Context& dev_ctx,
const DenseTensor& param, const DenseTensor& param UNUSED,
const DenseTensor& learning_rate, const DenseTensor& learning_rate,
const SelectedRows& grad, const SelectedRows& grad,
const paddle::optional<DenseTensor>& master_param, const paddle::optional<DenseTensor>& master_param UNUSED,
bool multi_precision, bool multi_precision UNUSED,
DenseTensor* param_out, DenseTensor* param_out,
DenseTensor* master_param_out) { DenseTensor* master_param_out UNUSED) {
const auto& grad_value = grad.value(); const auto& grad_value = grad.value();
const auto& grad_rows = grad.rows(); const auto& grad_rows = grad.rows();
const auto grad_height = grad.height(); const auto grad_height = grad.height();
......
...@@ -21,13 +21,13 @@ namespace phi { ...@@ -21,13 +21,13 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void SliceGradKernel(const Context& dev_ctx, void SliceGradKernel(const Context& dev_ctx,
const DenseTensor& input, const DenseTensor& input UNUSED,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int64_t>& axes, const std::vector<int64_t>& axes,
const IntArray& starts, const IntArray& starts,
const IntArray& ends, const IntArray& ends,
const std::vector<int64_t>& infer_flags, const std::vector<int64_t>& infer_flags UNUSED,
const std::vector<int64_t>& decrease_axis, const std::vector<int64_t>& decrease_axis UNUSED,
DenseTensor* input_grad) { DenseTensor* input_grad) {
const auto& onednn_engine = dev_ctx.GetEngine(); const auto& onednn_engine = dev_ctx.GetEngine();
......
...@@ -23,7 +23,7 @@ template <typename T, typename Context> ...@@ -23,7 +23,7 @@ template <typename T, typename Context>
void SoftplusKernel(const Context& dev_ctx, void SoftplusKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
float beta, float beta,
float threshold, float threshold UNUSED,
DenseTensor* out) { DenseTensor* out) {
funcs::SoftplusOneDNNHandler<T> handler(dev_ctx, &x, beta); funcs::SoftplusOneDNNHandler<T> handler(dev_ctx, &x, beta);
......
...@@ -23,7 +23,7 @@ template <typename T, typename Context> ...@@ -23,7 +23,7 @@ template <typename T, typename Context>
void SqueezeGradKernel(const Context& dev_ctx, void SqueezeGradKernel(const Context& dev_ctx,
const DenseTensor& xshape, const DenseTensor& xshape,
const DenseTensor& dout, const DenseTensor& dout,
const IntArray& axes, const IntArray& axes UNUSED,
DenseTensor* dx) { DenseTensor* dx) {
auto dout_vec_dims = dout.dims().size() != 0 ? vectorize(dout.dims()) auto dout_vec_dims = dout.dims().size() != 0 ? vectorize(dout.dims())
: std::vector<int64_t>{1}; : std::vector<int64_t>{1};
......
...@@ -39,21 +39,21 @@ void AdamDenseParamSparseGradKernel( ...@@ -39,21 +39,21 @@ void AdamDenseParamSparseGradKernel(
const DenseTensor& moment2, const DenseTensor& moment2,
const DenseTensor& beta1_pow, const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow, const DenseTensor& beta2_pow,
const paddle::optional<DenseTensor>& master_param, const paddle::optional<DenseTensor>& master_param UNUSED,
const paddle::optional<DenseTensor>& skip_update, const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1, const Scalar& beta1,
const Scalar& beta2, const Scalar& beta2,
const Scalar& epsilon, const Scalar& epsilon,
bool lazy_mode, bool lazy_mode,
int64_t min_row_size_to_use_multithread, int64_t min_row_size_to_use_multithread,
bool multi_precision, bool multi_precision UNUSED,
bool use_global_beta_pow, bool use_global_beta_pow,
DenseTensor* param_out, DenseTensor* param_out,
DenseTensor* moment1_out, DenseTensor* moment1_out,
DenseTensor* moment2_out, DenseTensor* moment2_out,
DenseTensor* beta1_pow_out, DenseTensor* beta1_pow_out,
DenseTensor* beta2_pow_out, DenseTensor* beta2_pow_out,
DenseTensor* master_param_outs) { DenseTensor* master_param_outs UNUSED) {
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow; VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
bool skip_update_ = false; bool skip_update_ = false;
......
...@@ -21,31 +21,31 @@ namespace phi { ...@@ -21,31 +21,31 @@ namespace phi {
namespace sparse { namespace sparse {
template <typename T, typename Context> template <typename T, typename Context>
void AddmmCooDenseGradKernel(const Context& dev_ctx, void AddmmCooDenseGradKernel(const Context& dev_ctx UNUSED,
const DenseTensor& input, const DenseTensor& input UNUSED,
const SparseCooTensor& x, const SparseCooTensor& x UNUSED,
const DenseTensor& y, const DenseTensor& y UNUSED,
const DenseTensor& dout, const DenseTensor& dout UNUSED,
float alpha, float alpha UNUSED,
float beta, float beta UNUSED,
DenseTensor* dinput, DenseTensor* dinput UNUSED,
SparseCooTensor* dx, SparseCooTensor* dx UNUSED,
DenseTensor* dy) { DenseTensor* dy UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of 'sparse.addmm' now.")); "Not support CPU backward kernel of 'sparse.addmm' now."));
} }
template <typename T, typename Context> template <typename T, typename Context>
void AddmmCsrDenseGradKernel(const Context& dev_ctx, void AddmmCsrDenseGradKernel(const Context& dev_ctx UNUSED,
const DenseTensor& input, const DenseTensor& input UNUSED,
const SparseCsrTensor& x, const SparseCsrTensor& x UNUSED,
const DenseTensor& y, const DenseTensor& y UNUSED,
const DenseTensor& dout, const DenseTensor& dout UNUSED,
float alpha, float alpha UNUSED,
float beta, float beta UNUSED,
DenseTensor* dinput, DenseTensor* dinput UNUSED,
SparseCsrTensor* dx, SparseCsrTensor* dx UNUSED,
DenseTensor* dy) { DenseTensor* dy UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of 'sparse.addmm' now.")); "Not support CPU backward kernel of 'sparse.addmm' now."));
} }
......
...@@ -21,26 +21,26 @@ namespace sparse { ...@@ -21,26 +21,26 @@ namespace sparse {
/* DENSE + COO @ DENSE -> DENSE */ /* DENSE + COO @ DENSE -> DENSE */
template <typename T, typename Context> template <typename T, typename Context>
void AddmmCooDenseKernel(const Context& dev_ctx, void AddmmCooDenseKernel(const Context& dev_ctx UNUSED,
const DenseTensor& input, const DenseTensor& input UNUSED,
const SparseCooTensor& x, const SparseCooTensor& x UNUSED,
const DenseTensor& y, const DenseTensor& y UNUSED,
float beta, float beta UNUSED,
float alpha, float alpha UNUSED,
DenseTensor* out) { DenseTensor* out UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU kernel of 'sparse.addmm' now.")); "Not support CPU kernel of 'sparse.addmm' now."));
} }
/* DENSE + CSR @ DENSE -> DENSE */ /* DENSE + CSR @ DENSE -> DENSE */
template <typename T, typename Context> template <typename T, typename Context>
void AddmmCsrDenseKernel(const Context& dev_ctx, void AddmmCsrDenseKernel(const Context& dev_ctx UNUSED,
const DenseTensor& input, const DenseTensor& input UNUSED,
const SparseCsrTensor& x, const SparseCsrTensor& x UNUSED,
const DenseTensor& y, const DenseTensor& y UNUSED,
float beta, float beta UNUSED,
float alpha, float alpha UNUSED,
DenseTensor* out) { DenseTensor* out UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU kernel of 'sparse.addmm' now.")); "Not support CPU kernel of 'sparse.addmm' now."));
} }
......
...@@ -22,24 +22,24 @@ namespace sparse { ...@@ -22,24 +22,24 @@ namespace sparse {
// TODO(zhouwei25): implement CPU backward kernel of " CSR @ DENSE -> DENSE" // TODO(zhouwei25): implement CPU backward kernel of " CSR @ DENSE -> DENSE"
template <typename T, typename Context> template <typename T, typename Context>
void MatmulCsrDenseGradKernel(const Context& dev_ctx, void MatmulCsrDenseGradKernel(const Context& dev_ctx UNUSED,
const SparseCsrTensor& x, const SparseCsrTensor& x UNUSED,
const DenseTensor& y, const DenseTensor& y UNUSED,
const DenseTensor& dout, const DenseTensor& dout UNUSED,
SparseCsrTensor* dx, SparseCsrTensor* dx UNUSED,
DenseTensor* dy) { DenseTensor* dy UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of 'sparse.matmul' now.")); "Not support CPU backward kernel of 'sparse.matmul' now."));
} }
// TODO(zhouwei25): implement CPU kernel of " DENSE @ DENSE * CSR_MASK -> CSR" // TODO(zhouwei25): implement CPU kernel of " DENSE @ DENSE * CSR_MASK -> CSR"
template <typename T, typename Context> template <typename T, typename Context>
void MaskedMatmulCsrGradKernel(const Context& dev_ctx, void MaskedMatmulCsrGradKernel(const Context& dev_ctx UNUSED,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& y, const DenseTensor& y UNUSED,
const SparseCsrTensor& dout, const SparseCsrTensor& dout UNUSED,
DenseTensor* dx, DenseTensor* dx UNUSED,
DenseTensor* dy) { DenseTensor* dy UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of 'sparse.masked_matmul' now.")); "Not support CPU backward kernel of 'sparse.masked_matmul' now."));
} }
......
...@@ -22,21 +22,21 @@ namespace sparse { ...@@ -22,21 +22,21 @@ namespace sparse {
// TODO(zhouwei25): implement CPU kernel of " CSR @ DENSE -> DENSE" // TODO(zhouwei25): implement CPU kernel of " CSR @ DENSE -> DENSE"
template <typename T, typename Context> template <typename T, typename Context>
void MatmulCsrDenseKernel(const Context& dev_ctx, void MatmulCsrDenseKernel(const Context& dev_ctx UNUSED,
const SparseCsrTensor& x, const SparseCsrTensor& x UNUSED,
const DenseTensor& y, const DenseTensor& y UNUSED,
DenseTensor* out) { DenseTensor* out UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU kernel of 'sparse.matmul' now.")); "Not support CPU kernel of 'sparse.matmul' now."));
} }
// TODO(zhouwei25): implement CPU kernel of " DENSE @ DENSE * CSR_MASK -> CSR" // TODO(zhouwei25): implement CPU kernel of " DENSE @ DENSE * CSR_MASK -> CSR"
template <typename T, typename Context> template <typename T, typename Context>
void MaskedMatmulCsrKernel(const Context& dev_ctx, void MaskedMatmulCsrKernel(const Context& dev_ctx UNUSED,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& y, const DenseTensor& y UNUSED,
const SparseCsrTensor& mask, const SparseCsrTensor& mask UNUSED,
SparseCsrTensor* out) { SparseCsrTensor* out UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU kernel of 'sparse.masked_matmul' now.")); "Not support CPU kernel of 'sparse.masked_matmul' now."));
} }
......
...@@ -21,23 +21,23 @@ namespace phi { ...@@ -21,23 +21,23 @@ namespace phi {
namespace sparse { namespace sparse {
template <typename T, typename Context> template <typename T, typename Context>
void MvCooGradKernel(const Context& dev_ctx, void MvCooGradKernel(const Context& dev_ctx UNUSED,
const SparseCooTensor& x, const SparseCooTensor& x UNUSED,
const DenseTensor& vec, const DenseTensor& vec UNUSED,
const DenseTensor& dout, const DenseTensor& dout UNUSED,
SparseCooTensor* dx, SparseCooTensor* dx UNUSED,
DenseTensor* dvec) { DenseTensor* dvec UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of 'sparse.mv' now.")); "Not support CPU backward kernel of 'sparse.mv' now."));
} }
template <typename T, typename Context> template <typename T, typename Context>
void MvCsrGradKernel(const Context& dev_ctx, void MvCsrGradKernel(const Context& dev_ctx UNUSED,
const SparseCsrTensor& x, const SparseCsrTensor& x UNUSED,
const DenseTensor& vec, const DenseTensor& vec UNUSED,
const DenseTensor& dout, const DenseTensor& dout UNUSED,
SparseCsrTensor* dx, SparseCsrTensor* dx UNUSED,
DenseTensor* dvec) { DenseTensor* dvec UNUSED) {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of 'sparse.mv' now.")); "Not support CPU backward kernel of 'sparse.mv' now."));
} }
......
...@@ -21,19 +21,19 @@ namespace phi { ...@@ -21,19 +21,19 @@ namespace phi {
namespace sparse { namespace sparse {
template <typename T, typename Context> template <typename T, typename Context>
void MvCsrKernel(const Context& dev_ctx, void MvCsrKernel(const Context& dev_ctx UNUSED,
const SparseCsrTensor& x, const SparseCsrTensor& x UNUSED,
const DenseTensor& vec, const DenseTensor& vec UNUSED,
DenseTensor* out) { DenseTensor* out UNUSED) {
PADDLE_THROW( PADDLE_THROW(
phi::errors::Unimplemented("Not support CPU kernel of 'sparse.mv' now.")); phi::errors::Unimplemented("Not support CPU kernel of 'sparse.mv' now."));
} }
template <typename T, typename Context> template <typename T, typename Context>
void MvCooKernel(const Context& dev_ctx, void MvCooKernel(const Context& dev_ctx UNUSED,
const SparseCooTensor& x, const SparseCooTensor& x UNUSED,
const DenseTensor& vec, const DenseTensor& vec UNUSED,
DenseTensor* out) { DenseTensor* out UNUSED) {
PADDLE_THROW( PADDLE_THROW(
phi::errors::Unimplemented("Not support CPU kernel of 'sparse.mv' now.")); phi::errors::Unimplemented("Not support CPU kernel of 'sparse.mv' now."));
} }
......
...@@ -81,7 +81,7 @@ struct StringCaseConvertKernel { ...@@ -81,7 +81,7 @@ struct StringCaseConvertKernel {
template <typename DeviceContext, typename CharConverter> template <typename DeviceContext, typename CharConverter>
struct AsciiCaseConverter { struct AsciiCaseConverter {
void operator()(const DeviceContext& dev_ctx, void operator()(const DeviceContext& dev_ctx UNUSED,
const pstring* in, const pstring* in,
pstring* out, pstring* out,
size_t num) const { size_t num) const {
...@@ -96,7 +96,7 @@ template <typename DeviceContext, ...@@ -96,7 +96,7 @@ template <typename DeviceContext,
template <typename DeviceContextT> template <typename DeviceContextT>
class CharConverter> class CharConverter>
struct UTF8CaseConverter { struct UTF8CaseConverter {
void operator()(const DeviceContext& dev_ctx, void operator()(const DeviceContext& dev_ctx UNUSED,
const pstring* in, const pstring* in,
pstring* out, pstring* out,
size_t num) const { size_t num) const {
......
...@@ -18,7 +18,7 @@ namespace phi { ...@@ -18,7 +18,7 @@ namespace phi {
#define DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(func_name, op_name, attrs) \ #define DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(func_name, op_name, attrs) \
KernelSignature func_name##GradOpArgumentMapping( \ KernelSignature func_name##GradOpArgumentMapping( \
const ArgumentMappingContext& ctx) { \ const ArgumentMappingContext& ctx UNUSED) { \
return KernelSignature( \ return KernelSignature( \
op_name "_grad", {"X", "Out@GRAD"}, {attrs}, {"X@GRAD"}); \ op_name "_grad", {"X", "Out@GRAD"}, {attrs}, {"X@GRAD"}); \
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册