diff --git a/paddle/fluid/operators/match_matrix_tensor_op.cc b/paddle/fluid/operators/match_matrix_tensor_op.cc index d30f396f9c1e8cf1c480c159ec9943b9542ca546..0c3d121605f03126846327d1c7b8acf75dee6822 100644 --- a/paddle/fluid/operators/match_matrix_tensor_op.cc +++ b/paddle/fluid/operators/match_matrix_tensor_op.cc @@ -56,8 +56,8 @@ void MatchMatrixTensorOP::InferShape(framework::InferShapeContext* ctx) const { PADDLE_ENFORCE_EQ(w_dims[2], y_dims[1], "W 's shape must satisfy: W[2] = Y[1]"); - int out_dim_0 = -1; - int tmp_dim_0 = -1; + int64_t out_dim_0 = -1; + int64_t tmp_dim_0 = -1; if (ctx->IsRuntime()) { framework::Variable* x_var = boost::get(ctx->GetInputVarPtrs("X")[0]); @@ -86,8 +86,8 @@ void MatchMatrixTensorOP::InferShape(framework::InferShapeContext* ctx) const { out_dim_0 = 0; for (size_t i = 1; i < x_lod_0.size(); i++) { - int x_len = x_lod_0[i] - x_lod_0[i - 1]; - int y_len = y_lod_0[i] - y_lod_0[i - 1]; + int64_t x_len = x_lod_0[i] - x_lod_0[i - 1]; + int64_t y_len = y_lod_0[i] - y_lod_0[i - 1]; out_dim_0 += (x_len * y_len); } out_dim_0 *= dim_t; @@ -173,17 +173,17 @@ class CPUMatchMatrixTensorOPKernel : public framework::OpKernel { auto* tmp = ctx.Output("Tmp"); int dim_t = ctx.Attr("dim_t"); - int dim_in = x->dims()[1]; + int64_t dim_in = x->dims()[1]; const auto& offset_l = x->lod()[0]; const auto& offset_r = y->lod()[0]; std::vector top_offset; - int top_size = 0; + size_t top_size = 0; top_offset.push_back(top_size); for (size_t b = 0; b < x->lod()[0].size() - 1; b++) { - int len_l = offset_l[b + 1] - offset_l[b]; - int len_r = offset_r[b + 1] - offset_r[b]; + size_t len_l = offset_l[b + 1] - offset_l[b]; + size_t len_r = offset_r[b + 1] - offset_r[b]; top_size += dim_t * len_l * len_r; top_offset.push_back(top_size); } @@ -204,8 +204,8 @@ class CPUMatchMatrixTensorOPKernel : public framework::OpKernel { for (size_t b = 0; b < x->lod()[0].size() - 1; b++) { for (int t = 0; t < dim_t; t++) { - int len_l = offset_l[b + 1] - offset_l[b]; - int len_r = offset_r[b + 1] - offset_r[b]; + size_t len_l = offset_l[b + 1] - offset_l[b]; + size_t len_r = offset_r[b + 1] - offset_r[b]; auto* top_data = out_data + top_offset[b] + t * len_l * len_r; const auto* l_t_data = bottom_l_trans_data + offset_l[b] * dim_t * dim_in + t * dim_in; @@ -234,16 +234,16 @@ class CPUMatchMatrixTensorOPGradKernel : public framework::OpKernel { auto* tmp = ctx.Input("Tmp"); int dim_t = ctx.Attr("dim_t"); - int dim_in = x->dims()[1]; + int64_t dim_in = x->dims()[1]; const auto& offset_l = x->lod()[0]; const auto& offset_r = y->lod()[0]; - std::vector top_offset; - int top_size = 0; + std::vector top_offset; + size_t top_size = 0; top_offset.push_back(top_size); for (size_t b = 0; b < x->lod()[0].size() - 1; b++) { - int len_l = offset_l[b + 1] - offset_l[b]; - int len_r = offset_r[b + 1] - offset_r[b]; + size_t len_l = offset_l[b + 1] - offset_l[b]; + size_t len_r = offset_r[b + 1] - offset_r[b]; top_size += dim_t * len_l * len_r; top_offset.push_back(top_size); } @@ -270,11 +270,11 @@ class CPUMatchMatrixTensorOPGradKernel : public framework::OpKernel { for (size_t b = 0; b < x->lod()[0].size() - 1; b++) { for (int t = 0; t < dim_t; t++) { - int len_l = offset_l[b + 1] - offset_l[b]; - int len_r = offset_r[b + 1] - offset_r[b]; + size_t len_l = offset_l[b + 1] - offset_l[b]; + size_t len_r = offset_r[b + 1] - offset_r[b]; - for (int i = 0; i < len_l; i++) { - for (int j = 0; j < len_r; j++) { + for (size_t i = 0; i < len_l; i++) { + for (size_t j = 0; j < len_r; j++) { auto diff = top_diff[top_offset[b] + t * len_l * len_r + i * len_r + j]; auto* l_trans_data = bottom_l_trans_data + @@ -324,11 +324,7 @@ REGISTER_OPERATOR(match_matrix_tensor_grad, ops::MatchMatrixTensorOpGrad); REGISTER_OP_CPU_KERNEL(match_matrix_tensor, ops::CPUMatchMatrixTensorOPKernel< paddle::platform::CPUDeviceContext, float>); -// ops::CPUMatchMatrixTensorOPKernel REGISTER_OP_CPU_KERNEL(match_matrix_tensor_grad, ops::CPUMatchMatrixTensorOPGradKernel< paddle::platform::CPUDeviceContext, float>); -// ops::CPUMatchMatrixTensorOPGradKernel diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 6ea4d0ddbbec55bd60318f3511fbabd8e12b1862..fe8c2f04aa90fa0c81118023b5716c2d45a2aacd 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -2060,7 +2060,8 @@ class DecayedAdagradOptimizer(Optimizer): }, outputs={"ParamOut": param_and_grad[0], "MomentOut": moment_acc}, - attrs={"epsilon": self._epsilon}, + attrs={"epsilon": self._epsilon, + "decay": self._decay}, stop_gradient=True) return decayed_adagrad_op