diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 27e778494003044fbe622d88c043a0a5e270281b..c57537be4bf67a8db6a49669ab8d2ed1b1324bdc 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -209,8 +209,7 @@ const std::vector InferShapeContext::MultiInput( template <> Tensor* ExecutionContext::Output(const std::string& name) const { auto* var = OutputVar(name); - if (var == nullptr) return nullptr; - return GetTensorFromVar(var); + return var == nullptr ? nullptr : const_cast(GetTensorFromVar(var)); } template <> @@ -222,7 +221,9 @@ std::vector ExecutionContext::MultiOutput( std::transform(names.begin(), names.end(), std::back_inserter(res), [&](const std::string& sub_name) { auto var = scope().FindVar(sub_name); - return var == nullptr ? nullptr : GetTensorFromVar(var); + return var == nullptr + ? nullptr + : const_cast(GetTensorFromVar(var)); }); return res; } diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index bbf9930f0af06f2345843298d73fea281ff69ba9..adae7bfc3d7d31b1ed0373f01db4ef80343a08f7 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -327,13 +327,13 @@ class InferShapeContext { return res; } - Tensor* GetTensorFromVar(const Variable* var) const { + const Tensor* GetTensorFromVar(const Variable* var) const { if (var->IsType()) { - return const_cast(&var->Get()); + return &var->Get(); } PADDLE_ENFORCE(var->IsType(), "The Input(%s) must be LoDTensor or Tensor."); - return const_cast(&var->Get()); + return &var->Get(); } private: @@ -341,6 +341,13 @@ class InferShapeContext { const Scope& scope_; }; +template <> +const Tensor* InferShapeContext::Input(const std::string& name) const; + +template <> +const std::vector InferShapeContext::MultiInput( + const std::string& name) const; + template struct EigenDeviceConverter; @@ -397,6 +404,13 @@ class ExecutionContext : public InferShapeContext { const platform::DeviceContext* device_context_; }; +template <> +Tensor* ExecutionContext::Output(const std::string& name) const; + +template <> +std::vector ExecutionContext::MultiOutput( + const std::string& name) const; + class OpKernel { public: /** diff --git a/paddle/operators/sequence_avg_pool_op.cc b/paddle/operators/sequence_avg_pool_op.cc index 59a361761a18f5394c2ac53147fa3630c726eb61..c15a5833deba2e198f6cb724bda7e3306c56e461 100644 --- a/paddle/operators/sequence_avg_pool_op.cc +++ b/paddle/operators/sequence_avg_pool_op.cc @@ -66,7 +66,7 @@ class SequenceAvgPoolGradOp : public framework::OperatorWithKernel { auto x_dims = ctx.Input("X")->dims(); PADDLE_ENFORCE_EQ(og_dims.size(), x_dims.size(), "The rank of output grad must equal to Input(X)."); - for (size_t i = 1; i < og_dims.size(); ++i) { + for (int64_t i = 1; i < og_dims.size(); ++i) { PADDLE_ENFORCE_EQ(og_dims[i], x_dims[i], "The dimension mismatch."); } auto* x_grad = diff --git a/paddle/operators/sequence_avg_pool_op.h b/paddle/operators/sequence_avg_pool_op.h index ba68b5e4b921cbf734526af4bd9f4e87642f9210..6e343b87e2938399409498407ac46b2416dc2231 100644 --- a/paddle/operators/sequence_avg_pool_op.h +++ b/paddle/operators/sequence_avg_pool_op.h @@ -38,7 +38,7 @@ class SequenceAvgPoolKernel : public framework::OpKernel { out->mutable_data(context.GetPlace()); auto place = context.GetEigenDevice(); - for (int i = 0; i < lod[0].size() - 1; ++i) { + for (int i = 0; i < static_cast(lod[0].size()) - 1; ++i) { Tensor in_t = in->Slice(static_cast(lod[0][i]), static_cast(lod[0][i + 1])); Tensor out_t = out->Slice(i, i + 1); @@ -64,7 +64,7 @@ class SequenceAvgPoolGradKernel : public framework::OpKernel { in_g->mutable_data(context.GetPlace()); auto place = context.GetEigenDevice(); - for (int i = 0; i < lod[0].size() - 1; ++i) { + for (int i = 0; i < static_cast(lod[0].size()) - 1; ++i) { auto in_g_t = in_g->Slice(static_cast(lod[0][i]), static_cast(lod[0][i + 1])); auto out_g_t = out_g->Slice(i, i + 1);