未验证 提交 16fcbb9b 编写于 作者: G Galaxy1458 提交者: GitHub

test,test=develop (#53851)

上级 2f42cb7f
...@@ -47,7 +47,7 @@ void InstanceNormGradKernel(const Context& dev_ctx, ...@@ -47,7 +47,7 @@ void InstanceNormGradKernel(const Context& dev_ctx,
const DenseTensor& saved_mean, const DenseTensor& saved_mean,
const DenseTensor& saved_variance, const DenseTensor& saved_variance,
const DenseTensor& d_y, const DenseTensor& d_y,
float epsilon, float epsilon UNUSED,
DenseTensor* d_x, DenseTensor* d_x,
DenseTensor* d_scale, DenseTensor* d_scale,
DenseTensor* d_bias) { DenseTensor* d_bias) {
...@@ -160,7 +160,7 @@ void InstanceNormDoubleGradKernel(const Context& dev_ctx, ...@@ -160,7 +160,7 @@ void InstanceNormDoubleGradKernel(const Context& dev_ctx,
const paddle::optional<DenseTensor>& ddx, const paddle::optional<DenseTensor>& ddx,
const paddle::optional<DenseTensor>& ddscale, const paddle::optional<DenseTensor>& ddscale,
const paddle::optional<DenseTensor>& ddbias, const paddle::optional<DenseTensor>& ddbias,
float epsilon, float epsilon UNUSED,
DenseTensor* dx, DenseTensor* dx,
DenseTensor* dscale, DenseTensor* dscale,
DenseTensor* ddy) { DenseTensor* ddy) {
......
...@@ -30,7 +30,7 @@ template <typename T, typename Context> ...@@ -30,7 +30,7 @@ template <typename T, typename Context>
void LayerNormGradKernel(const Context& dev_ctx, void LayerNormGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const paddle::optional<DenseTensor>& scale_opt, const paddle::optional<DenseTensor>& scale_opt,
const paddle::optional<DenseTensor>& bias_opt, const paddle::optional<DenseTensor>& bias_opt UNUSED,
const DenseTensor& mean, const DenseTensor& mean,
const DenseTensor& variance, const DenseTensor& variance,
const DenseTensor& out_grad, const DenseTensor& out_grad,
......
...@@ -73,7 +73,7 @@ struct decay_score<T, true> { ...@@ -73,7 +73,7 @@ struct decay_score<T, true> {
template <typename T> template <typename T>
struct decay_score<T, false> { struct decay_score<T, false> {
T operator()(T iou, T max_iou, T sigma) { T operator()(T iou, T max_iou, T sigma UNUSED) {
return (1. - iou) / (1. - max_iou); return (1. - iou) / (1. - max_iou);
} }
}; };
......
...@@ -29,7 +29,7 @@ class Point_ { ...@@ -29,7 +29,7 @@ class Point_ {
// default constructor // default constructor
Point_() {} Point_() {}
Point_(T _x, T _y) {} Point_(T _x, T _y) {}
Point_(const Point_& pt) {} Point_(const Point_& pt UNUSED) {}
Point_& operator=(const Point_& pt); Point_& operator=(const Point_& pt);
// conversion to another data type // conversion to another data type
...@@ -116,7 +116,7 @@ T GetContourArea(const std::vector<Point_<T>>& vec) { ...@@ -116,7 +116,7 @@ T GetContourArea(const std::vector<Point_<T>>& vec) {
} }
template <class T> template <class T>
T PolyArea(const T* box, const size_t box_size, const bool normalized) { T PolyArea(const T* box, const size_t box_size, const bool normalized UNUSED) {
// If coordinate values are is invalid // If coordinate values are is invalid
// if area size <= 0, return 0. // if area size <= 0, return 0.
std::vector<Point_<T>> vec; std::vector<Point_<T>> vec;
...@@ -128,7 +128,7 @@ template <class T> ...@@ -128,7 +128,7 @@ template <class T>
T PolyOverlapArea(const T* box1, T PolyOverlapArea(const T* box1,
const T* box2, const T* box2,
const size_t box_size, const size_t box_size,
const bool normalized) { const bool normalized UNUSED) {
phi::funcs::gpc_polygon poly1; phi::funcs::gpc_polygon poly1;
phi::funcs::gpc_polygon poly2; phi::funcs::gpc_polygon poly2;
Array2Poly<T>(box1, box_size, &poly1); Array2Poly<T>(box1, box_size, &poly1);
......
...@@ -28,8 +28,8 @@ void NormGradKernel(const Context& ctx, ...@@ -28,8 +28,8 @@ void NormGradKernel(const Context& ctx,
const DenseTensor& norm, const DenseTensor& norm,
const DenseTensor& out_grad, const DenseTensor& out_grad,
int axis, int axis,
float epsilon, float epsilon UNUSED,
bool is_test, bool is_test UNUSED,
DenseTensor* x_grad) { DenseTensor* x_grad) {
auto* in_x = &x; auto* in_x = &x;
auto* in_dy = &out_grad; auto* in_dy = &out_grad;
......
...@@ -51,7 +51,7 @@ void PNormGradKernel(const Context& dev_ctx, ...@@ -51,7 +51,7 @@ void PNormGradKernel(const Context& dev_ctx,
float porder, float porder,
int axis, int axis,
float epsilon, float epsilon,
bool keepdim, bool keepdim UNUSED,
bool asvector, bool asvector,
DenseTensor* x_grad) { DenseTensor* x_grad) {
auto* in_x = &x; auto* in_x = &x;
......
...@@ -49,8 +49,8 @@ void PNormKernel(const Context& dev_ctx, ...@@ -49,8 +49,8 @@ void PNormKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
float porder, float porder,
int axis, int axis,
float epsilon, float epsilon UNUSED,
bool keepdim, bool keepdim UNUSED,
bool asvector, bool asvector,
DenseTensor* out) { DenseTensor* out) {
auto* in_x = &x; auto* in_x = &x;
......
...@@ -26,7 +26,7 @@ void ConstPad3DGradNCDHW(T* d_in_data, ...@@ -26,7 +26,7 @@ void ConstPad3DGradNCDHW(T* d_in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -52,7 +52,7 @@ void ConstPad3DGradNDHWC(T* d_in_data, ...@@ -52,7 +52,7 @@ void ConstPad3DGradNDHWC(T* d_in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -83,7 +83,7 @@ void ReflectPad3DGradNCDHW(T* d_in_data, ...@@ -83,7 +83,7 @@ void ReflectPad3DGradNCDHW(T* d_in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -114,7 +114,7 @@ void ReflectPad3DGradNDHWC(T* d_in_data, ...@@ -114,7 +114,7 @@ void ReflectPad3DGradNDHWC(T* d_in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -149,7 +149,7 @@ void ReplicatePad3DGradNCDHW(T* d_in_data, ...@@ -149,7 +149,7 @@ void ReplicatePad3DGradNCDHW(T* d_in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -173,7 +173,7 @@ void ReplicatePad3DGradNDHWC(T* d_in_data, ...@@ -173,7 +173,7 @@ void ReplicatePad3DGradNDHWC(T* d_in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -201,7 +201,7 @@ void CircularPad3DGradNCDHW(T* d_in_data, ...@@ -201,7 +201,7 @@ void CircularPad3DGradNCDHW(T* d_in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -224,7 +224,7 @@ void CircularPad3DGradNDHWC(T* d_in_data, ...@@ -224,7 +224,7 @@ void CircularPad3DGradNDHWC(T* d_in_data,
const int in_depth, const int in_depth,
const int in_height, const int in_height,
const int in_width, const int in_width,
const int out_depth, const int out_depth UNUSED,
const int out_height, const int out_height,
const int out_width, const int out_width,
const int pad_front, const int pad_front,
...@@ -360,11 +360,11 @@ void Pad3DGradNDHWC(T* d_in_data, ...@@ -360,11 +360,11 @@ void Pad3DGradNDHWC(T* d_in_data,
template <typename T, typename Context> template <typename T, typename Context>
void Pad3dGradKernel(const Context& dev_ctx, void Pad3dGradKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const IntArray& paddings, const IntArray& paddings,
const std::string& mode, const std::string& mode,
float pad_value, float pad_value UNUSED,
const std::string& data_format, const std::string& data_format,
DenseTensor* x_grad) { DenseTensor* x_grad) {
std::vector<int64_t> pads = paddings.GetData(); std::vector<int64_t> pads = paddings.GetData();
......
...@@ -26,7 +26,7 @@ void RandintKernel(const Context& dev_ctx, ...@@ -26,7 +26,7 @@ void RandintKernel(const Context& dev_ctx,
int low, int low,
int high, int high,
const IntArray& shape, const IntArray& shape,
DataType dtype, DataType dtype UNUSED,
DenseTensor* out) { DenseTensor* out) {
int seed = 0; int seed = 0;
out->Resize(phi::make_ddim(shape.GetData())); out->Resize(phi::make_ddim(shape.GetData()));
......
...@@ -27,17 +27,17 @@ struct RmsFunctor<T, phi::CPUContext> { ...@@ -27,17 +27,17 @@ struct RmsFunctor<T, phi::CPUContext> {
const DenseTensor &moment, const DenseTensor &moment,
const DenseTensor &learning_rate, const DenseTensor &learning_rate,
const paddle::optional<DenseTensor> &mean_grad_opt, const paddle::optional<DenseTensor> &mean_grad_opt,
const paddle::optional<DenseTensor> &master_param, const paddle::optional<DenseTensor> &master_param UNUSED,
float epsilon_t, float epsilon_t,
float decay_t, float decay_t,
float momentum_t, float momentum_t,
bool centered, bool centered,
bool multi_precision, bool multi_precision UNUSED,
DenseTensor *param_out, DenseTensor *param_out,
DenseTensor *moment_out, DenseTensor *moment_out,
DenseTensor *mean_square_out, DenseTensor *mean_square_out,
DenseTensor *mean_grad_out, DenseTensor *mean_grad_out,
DenseTensor *master_param_outs) { DenseTensor *master_param_outs UNUSED) {
auto epsilon = static_cast<T>(epsilon_t); auto epsilon = static_cast<T>(epsilon_t);
auto rho = static_cast<T>(decay_t); auto rho = static_cast<T>(decay_t);
auto momentum = static_cast<T>(momentum_t); auto momentum = static_cast<T>(momentum_t);
......
...@@ -168,11 +168,11 @@ void DropoutCpuFunctionInplace(const CPUContext& dev_ctx, ...@@ -168,11 +168,11 @@ void DropoutCpuFunctionInplace(const CPUContext& dev_ctx,
} }
template <typename Context, typename TensorType> template <typename Context, typename TensorType>
void SplitReserveData(const Context& dev_ctx, void SplitReserveData(const Context& dev_ctx UNUSED,
int direction_num, int direction_num UNUSED,
int time_step, int time_step UNUSED,
int batch_size, int batch_size UNUSED,
int hidden_size, int hidden_size UNUSED,
int gate_num, int gate_num,
int num_layers, int num_layers,
const std::string& mode, const std::string& mode,
......
...@@ -54,23 +54,23 @@ void CreateLstmGrad(phi::funcs::LstmMetaGrad<T>* lstm_grad) { ...@@ -54,23 +54,23 @@ void CreateLstmGrad(phi::funcs::LstmMetaGrad<T>* lstm_grad) {
template <typename T> template <typename T>
struct GradCell { struct GradCell {
virtual ~GradCell() {} virtual ~GradCell() {}
virtual void operator()(const CPUContext& dev_ctx, virtual void operator()(const CPUContext& dev_ctx UNUSED,
DenseTensor* gate_tensor, DenseTensor* gate_tensor UNUSED,
DenseTensor* state_tensor, DenseTensor* state_tensor UNUSED,
DenseTensor* act_state_tensor, DenseTensor* act_state_tensor UNUSED,
DenseTensor* hidden_tensor, DenseTensor* hidden_tensor UNUSED,
const DenseTensor* weight_hh, const DenseTensor* weight_hh UNUSED,
DenseTensor* pre_hidden, DenseTensor* pre_hidden UNUSED,
DenseTensor* pre_state, DenseTensor* pre_state UNUSED,
DenseTensor* grad_hidden, DenseTensor* grad_hidden UNUSED,
DenseTensor* grad_state, DenseTensor* grad_state UNUSED,
DenseTensor* grad_gate, DenseTensor* grad_gate UNUSED,
DenseTensor* grad_weight_hh, DenseTensor* grad_weight_hh UNUSED,
DenseTensor* grad_pre_hidden, DenseTensor* grad_pre_hidden UNUSED,
DenseTensor* grad_pre_state, DenseTensor* grad_pre_state UNUSED,
DenseTensor* grad_bias_hh, DenseTensor* grad_bias_hh UNUSED,
const DenseTensor& mask_tensor, const DenseTensor& mask_tensor UNUSED,
bool has_sequence_length) const {} bool has_sequence_length UNUSED) const {}
void postprocess_pre_hidden_grad(const CPUContext& dev_ctx, void postprocess_pre_hidden_grad(const CPUContext& dev_ctx,
DenseTensor* grad_pre_hidden, DenseTensor* grad_pre_hidden,
...@@ -218,17 +218,17 @@ struct GRUGradCell : GradCell<T> { ...@@ -218,17 +218,17 @@ struct GRUGradCell : GradCell<T> {
void operator()(const CPUContext& dev_ctx, void operator()(const CPUContext& dev_ctx,
DenseTensor* gate_tensor, DenseTensor* gate_tensor,
DenseTensor* state_tensor, DenseTensor* state_tensor,
DenseTensor* act_state_tensor, DenseTensor* act_state_tensor UNUSED,
DenseTensor* hidden_tensor, DenseTensor* hidden_tensor UNUSED,
const DenseTensor* weight_hh, const DenseTensor* weight_hh,
DenseTensor* pre_hidden, DenseTensor* pre_hidden,
DenseTensor* pre_state, DenseTensor* pre_state UNUSED,
DenseTensor* grad_hidden, DenseTensor* grad_hidden,
DenseTensor* grad_state, DenseTensor* grad_state,
DenseTensor* grad_gate, DenseTensor* grad_gate,
DenseTensor* grad_weight_hh, DenseTensor* grad_weight_hh,
DenseTensor* grad_pre_hidden, DenseTensor* grad_pre_hidden,
DenseTensor* grad_pre_state, DenseTensor* grad_pre_state UNUSED,
DenseTensor* grad_bias_hh, DenseTensor* grad_bias_hh,
const DenseTensor& mask_tensor, const DenseTensor& mask_tensor,
bool has_sequence_length) const override { bool has_sequence_length) const override {
...@@ -284,7 +284,7 @@ struct LSTMGradCell : GradCell<T> { ...@@ -284,7 +284,7 @@ struct LSTMGradCell : GradCell<T> {
DenseTensor* gate_tensor, DenseTensor* gate_tensor,
DenseTensor* state_tensor, DenseTensor* state_tensor,
DenseTensor* act_state_tensor, DenseTensor* act_state_tensor,
DenseTensor* hidden_tensor, DenseTensor* hidden_tensor UNUSED,
const DenseTensor* weight_hh, const DenseTensor* weight_hh,
DenseTensor* pre_hidden, DenseTensor* pre_hidden,
DenseTensor* pre_state, DenseTensor* pre_state,
...@@ -294,7 +294,7 @@ struct LSTMGradCell : GradCell<T> { ...@@ -294,7 +294,7 @@ struct LSTMGradCell : GradCell<T> {
DenseTensor* grad_weight_hh, DenseTensor* grad_weight_hh,
DenseTensor* grad_pre_hidden, DenseTensor* grad_pre_hidden,
DenseTensor* grad_pre_state, DenseTensor* grad_pre_state,
DenseTensor* grad_bias_hh, DenseTensor* grad_bias_hh UNUSED,
const DenseTensor& mask_tensor, const DenseTensor& mask_tensor,
bool has_sequence_length) const override { bool has_sequence_length) const override {
size_t frame_size = state_tensor->dims()[2]; size_t frame_size = state_tensor->dims()[2];
......
...@@ -35,17 +35,17 @@ namespace phi { ...@@ -35,17 +35,17 @@ namespace phi {
template <typename T> template <typename T>
struct Cell { struct Cell {
virtual ~Cell() {} virtual ~Cell() {}
virtual void operator()(const CPUContext* dev_ctx, virtual void operator()(const CPUContext* dev_ctx UNUSED,
DenseTensor* input, DenseTensor* input UNUSED,
const DenseTensor* weight_hh, const DenseTensor* weight_hh UNUSED,
const DenseTensor* init_h, const DenseTensor* init_h UNUSED,
const DenseTensor* init_c, const DenseTensor* init_c UNUSED,
DenseTensor* last_h, DenseTensor* last_h UNUSED,
DenseTensor* last_c, DenseTensor* last_c UNUSED,
DenseTensor* last_c_act, DenseTensor* last_c_act UNUSED,
DenseTensor* output, DenseTensor* output UNUSED,
const DenseTensor* bias_hh, const DenseTensor* bias_hh UNUSED,
DenseTensor* weight_hh_gru) const {} DenseTensor* weight_hh_gru UNUSED) const {}
}; };
template <typename T, template <typename T,
...@@ -96,10 +96,10 @@ struct GRUCell : Cell<T> { ...@@ -96,10 +96,10 @@ struct GRUCell : Cell<T> {
DenseTensor* input, DenseTensor* input,
const DenseTensor* weight_hh, const DenseTensor* weight_hh,
const DenseTensor* init_h, const DenseTensor* init_h,
const DenseTensor* init_c, const DenseTensor* init_c UNUSED,
DenseTensor* last_h, DenseTensor* last_h UNUSED,
DenseTensor* last_c, DenseTensor* last_c,
DenseTensor* last_c_act, DenseTensor* last_c_act UNUSED,
DenseTensor* output, DenseTensor* output,
const DenseTensor* bias_hh, const DenseTensor* bias_hh,
DenseTensor* weight_hh_gru) const override { DenseTensor* weight_hh_gru) const override {
...@@ -146,12 +146,12 @@ struct LSTMCell : Cell<T> { ...@@ -146,12 +146,12 @@ struct LSTMCell : Cell<T> {
const DenseTensor* weight_hh, const DenseTensor* weight_hh,
const DenseTensor* init_h, const DenseTensor* init_h,
const DenseTensor* init_c, const DenseTensor* init_c,
DenseTensor* last_h, DenseTensor* last_h UNUSED,
DenseTensor* last_c, DenseTensor* last_c,
DenseTensor* last_c_act, DenseTensor* last_c_act,
DenseTensor* output, DenseTensor* output,
const DenseTensor* bias_hh, const DenseTensor* bias_hh UNUSED,
DenseTensor* weight_hh_gru) const override { DenseTensor* weight_hh_gru UNUSED) const override {
auto blas = phi::funcs::GetBlas<CPUContext, T>(*dev_ctx); auto blas = phi::funcs::GetBlas<CPUContext, T>(*dev_ctx);
auto mat_dim_a = auto mat_dim_a =
phi::funcs::CreateMatrixDescriptor(init_h->dims(), 0, false); phi::funcs::CreateMatrixDescriptor(init_h->dims(), 0, false);
......
...@@ -20,10 +20,10 @@ ...@@ -20,10 +20,10 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void TrilIndicesKernel(const Context& dev_ctx, void TrilIndicesKernel(const Context& dev_ctx,
int rows, int rows UNUSED,
int cols, int cols,
int offset, int offset,
DataType dtype, DataType dtype UNUSED,
DenseTensor* out) { DenseTensor* out) {
T* out_data = dev_ctx.template Alloc<T>(out); T* out_data = dev_ctx.template Alloc<T>(out);
const auto& out_dims = out->dims(); const auto& out_dims = out->dims();
......
...@@ -20,10 +20,10 @@ ...@@ -20,10 +20,10 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void TriuIndicesKernel(const Context& dev_ctx, void TriuIndicesKernel(const Context& dev_ctx,
int row, int row UNUSED,
int col, int col,
int offset, int offset,
DataType dtype, DataType dtype UNUSED,
DenseTensor* out) { DenseTensor* out) {
T* out_data = dev_ctx.template Alloc<T>(out); T* out_data = dev_ctx.template Alloc<T>(out);
const auto& out_dims = out->dims(); const auto& out_dims = out->dims();
......
...@@ -20,13 +20,13 @@ namespace phi { ...@@ -20,13 +20,13 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void UniformInplaceGradKernel(const Context& ctx, void UniformInplaceGradKernel(const Context& ctx,
const DenseTensor& out_grad, const DenseTensor& out_grad UNUSED,
float min, float min UNUSED,
float max, float max UNUSED,
int seed, int seed UNUSED,
int diag_num, int diag_num UNUSED,
int diag_step, int diag_step UNUSED,
float diag_val, float diag_val UNUSED,
DenseTensor* x_grad) { DenseTensor* x_grad) {
if (x_grad) { if (x_grad) {
auto* data = ctx.template Alloc<T>(x_grad); auto* data = ctx.template Alloc<T>(x_grad);
......
...@@ -20,13 +20,13 @@ namespace phi { ...@@ -20,13 +20,13 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void UniformInplaceKernel(const Context& ctx, void UniformInplaceKernel(const Context& ctx,
const DenseTensor& x, const DenseTensor& x UNUSED,
float min, float min,
float max, float max,
int seed, int seed,
int diag_num, int diag_num UNUSED,
int diag_step, int diag_step UNUSED,
float diag_val, float diag_val UNUSED,
DenseTensor* out) { DenseTensor* out) {
T* data = ctx.template Alloc<T>(out); T* data = ctx.template Alloc<T>(out);
int64_t size = out->numel(); int64_t size = out->numel();
......
...@@ -30,11 +30,11 @@ void UnpoolGradKernel(const Context& dev_ctx, ...@@ -30,11 +30,11 @@ void UnpoolGradKernel(const Context& dev_ctx,
const DenseTensor& indices, const DenseTensor& indices,
const DenseTensor& out, const DenseTensor& out,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize UNUSED,
const std::vector<int>& strides, const std::vector<int>& strides UNUSED,
const std::vector<int>& paddings, const std::vector<int>& paddings UNUSED,
const IntArray& output_size, const IntArray& output_size UNUSED,
const std::string& data_format, const std::string& data_format UNUSED,
DenseTensor* x_grad) { DenseTensor* x_grad) {
T* input_grad_data = dev_ctx.template Alloc<T>(x_grad); T* input_grad_data = dev_ctx.template Alloc<T>(x_grad);
const T* output_grad_data = out_grad.data<T>(); const T* output_grad_data = out_grad.data<T>();
...@@ -80,11 +80,11 @@ void Unpool3dGradKernel(const Context& dev_ctx, ...@@ -80,11 +80,11 @@ void Unpool3dGradKernel(const Context& dev_ctx,
const DenseTensor& indices, const DenseTensor& indices,
const DenseTensor& out, const DenseTensor& out,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int>& ksize, const std::vector<int>& ksize UNUSED,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const std::vector<int>& paddings UNUSED,
const std::vector<int>& output_size, const std::vector<int>& output_size UNUSED,
const std::string& data_format, const std::string& data_format UNUSED,
DenseTensor* x_grad) { DenseTensor* x_grad) {
T* input_grad_data = dev_ctx.template Alloc<T>(x_grad); T* input_grad_data = dev_ctx.template Alloc<T>(x_grad);
const T* output_grad_data = out_grad.data<T>(); const T* output_grad_data = out_grad.data<T>();
......
...@@ -27,11 +27,11 @@ template <typename T, typename Context> ...@@ -27,11 +27,11 @@ template <typename T, typename Context>
void UnpoolKernel(const Context& dev_ctx, void UnpoolKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& indices, const DenseTensor& indices,
const std::vector<int>& ksize, const std::vector<int>& ksize UNUSED,
const std::vector<int>& strides, const std::vector<int>& strides UNUSED,
const std::vector<int>& paddings, const std::vector<int>& paddings UNUSED,
const IntArray& output_size, const IntArray& output_size UNUSED,
const std::string& data_format, const std::string& data_format UNUSED,
DenseTensor* out) { DenseTensor* out) {
T* output_data = dev_ctx.template Alloc<T>(out); T* output_data = dev_ctx.template Alloc<T>(out);
if (output_data) { if (output_data) {
...@@ -76,11 +76,11 @@ template <typename T, typename Context> ...@@ -76,11 +76,11 @@ template <typename T, typename Context>
void Unpool3dKernel(const Context& dev_ctx, void Unpool3dKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
const DenseTensor& indices, const DenseTensor& indices,
const std::vector<int>& ksize, const std::vector<int>& ksize UNUSED,
const std::vector<int>& strides, const std::vector<int>& strides UNUSED,
const std::vector<int>& paddings, const std::vector<int>& paddings UNUSED,
const std::vector<int>& output_size, const std::vector<int>& output_size UNUSED,
const std::string& data_format, const std::string& data_format UNUSED,
DenseTensor* out) { DenseTensor* out) {
T* output_data = dev_ctx.template Alloc<T>(out); T* output_data = dev_ctx.template Alloc<T>(out);
if (output_data) { if (output_data) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册