diff --git a/paddle/fluid/framework/attribute.h b/paddle/fluid/framework/attribute.h index 86dd7a68b532c1f0f0f3c5bdde3dcbc9a21f4cb0..e23abe62acd2ea1efc3298c8e9ab0d34cf4bc216 100644 --- a/paddle/fluid/framework/attribute.h +++ b/paddle/fluid/framework/attribute.h @@ -220,7 +220,7 @@ class DefaultValueSetter { public: explicit DefaultValueSetter(T default_value) : default_value_(default_value) {} - void operator()(T* value) const { *value = default_value_; } + const T& operator()() const { return default_value_; } private: T default_value_; @@ -259,7 +259,7 @@ class EnumInContainer { // an attribute can have more than one limits template class TypedAttrChecker { - typedef std::function DefaultValueChecker; + typedef std::function DefaultValueChecker; typedef std::function ValueChecker; public: @@ -297,18 +297,17 @@ class TypedAttrChecker { } void operator()(AttributeMap* attr_map) const { - if (!attr_map->count(attr_name_)) { + auto it = attr_map->find(attr_name_); + if (it == attr_map->end()) { // user do not set this attr PADDLE_ENFORCE(!default_value_setter_.empty(), "Attribute '%s' is required!", attr_name_); // default_value_setter_ has no more than one element - T val; - (default_value_setter_[0])(&val); - (*attr_map)[attr_name_] = val; + attr_map->emplace(attr_name_, default_value_setter_[0]()); } - Attribute& attr = attr_map->at(attr_name_); + it = attr_map->find(attr_name_); ExtractAttribute extract_attr(attr_name_); - T* attr_value = extract_attr(attr); + T* attr_value = extract_attr(it->second); for (const auto& checker : value_checkers_) { checker(*attr_value); } diff --git a/paddle/fluid/framework/data_device_transform_test.cu b/paddle/fluid/framework/data_device_transform_test.cu index 96a2f9250ff928fe58a5339a25c68c9db515522d..9681b33c0aff676d20b1c3acc4659f2379f207eb 100644 --- a/paddle/fluid/framework/data_device_transform_test.cu +++ b/paddle/fluid/framework/data_device_transform_test.cu @@ -64,7 +64,7 @@ template class TestKernel : public OpKernel { public: void Compute(const ExecutionContext& ctx) const { - std::cout << ctx.op().DebugString() << std::endl; + std::cout << ctx.DebugString() << std::endl; const Tensor* input = ctx.Input("input"); diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 4da50d6578f948373ef4da5dcbbe1b3b64347c7c..c657d7a2bccde1d02b9b7e1f8df451df5d99b534 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -47,18 +47,16 @@ class CompileTimeInferShapeContext : public InferShapeContext { AttrReader Attrs() const override; - const std::vector &Inputs( - const std::string &name) const override; + std::vector Inputs(const std::string &name) const override; - const std::vector &Outputs( - const std::string &name) const override; + std::vector Outputs(const std::string &name) const override; void ShareDim(const std::string &in, const std::string &out, size_t i = 0, size_t j = 0) override { PADDLE_ENFORCE_LT(i, Inputs(in).size()); PADDLE_ENFORCE_LT(j, Outputs(out).size()); - const std::string &input_n = Inputs(in)[i]; - const std::string &output_n = Outputs(out)[j]; + std::string input_n = Inputs(in)[i]; + std::string output_n = Outputs(out)[j]; PADDLE_ENFORCE(input_n != framework::kEmptyVarName, "The %s[%d] is @EMPTY@", in, i); @@ -74,6 +72,33 @@ class CompileTimeInferShapeContext : public InferShapeContext { SetDim(output_n, GetDim(input_n)); } + void ShareAllLoD(const std::string &in, + const std::string &out) const override { + auto &in_var_names = op_.Input(in); + auto &out_var_names = op_.Output(out); + + PADDLE_ENFORCE_EQ( + in_var_names.size(), out_var_names.size(), + platform::errors::PreconditionNotMet( + "Op [%s]: Input var number shoule be equal with output var number", + op_.Type())); + + for (size_t i = 0; i < in_var_names.size(); ++i) { + if (out_var_names[i] == framework::kEmptyVarName) { + continue; + } + + auto *in_var = block_.FindVarRecursive(in_var_names[i]); + auto *out_var = block_.FindVarRecursive(out_var_names[i]); + if (in_var->GetType() != proto::VarType::LOD_TENSOR && + in_var->GetType() != proto::VarType::LOD_TENSOR_ARRAY) { + VLOG(3) << "input " << in << " is not LoDTensor or LoDTensorArray."; + return; + } + out_var->SetLoDLevel(in_var->GetLoDLevel()); + } + } + void ShareLoD(const std::string &in, const std::string &out, size_t i = 0, size_t j = 0) const override { PADDLE_ENFORCE_LT(i, Inputs(in).size()); @@ -173,7 +198,7 @@ class CompileTimeInferShapeContext : public InferShapeContext { } void SetOutputDim(const std::string &name, const DDim &dim) override { - auto &arg_names = Outputs(name); + auto arg_names = Outputs(name); PADDLE_ENFORCE_EQ(arg_names.size(), 1UL, "Output(%s) should hold one element, but now it holds %d", name, arg_names.size()); @@ -182,7 +207,7 @@ class CompileTimeInferShapeContext : public InferShapeContext { void SetOutputsDim(const std::string &name, const std::vector &dims) override { - auto &names = Outputs(name); + auto names = Outputs(name); SetDims(names, dims); } @@ -789,12 +814,12 @@ AttrReader CompileTimeInferShapeContext::Attrs() const { return AttrReader(op_.GetAttrMap()); } -const std::vector &CompileTimeInferShapeContext::Inputs( +std::vector CompileTimeInferShapeContext::Inputs( const std::string &name) const { return op_.Input(name); } -const std::vector &CompileTimeInferShapeContext::Outputs( +std::vector CompileTimeInferShapeContext::Outputs( const std::string &name) const { return op_.Output(name); } diff --git a/paddle/fluid/framework/op_registry.cc b/paddle/fluid/framework/op_registry.cc index 346d14d408ea1ed2cfbdbed5f48e56902e6e95b2..81cfaf92e391de28356dec05fd7b94cce3e51440 100644 --- a/paddle/fluid/framework/op_registry.cc +++ b/paddle/fluid/framework/op_registry.cc @@ -21,9 +21,9 @@ namespace framework { std::unique_ptr OpRegistry::CreateOp( const std::string& type, const VariableNameMap& inputs, - const VariableNameMap& outputs, AttributeMap attrs) { + const VariableNameMap& outputs, AttributeMap attrs, bool attr_check) { auto& info = OpInfoMap::Instance().Get(type); - if (info.Checker() != nullptr) { + if (attr_check && info.Checker() != nullptr) { info.Checker()->Check(&attrs); } auto op = info.Creator()(type, inputs, outputs, attrs); diff --git a/paddle/fluid/framework/op_registry.h b/paddle/fluid/framework/op_registry.h index 221ab352ad3ce3043c3c9219b33e7ca7ade1b23f..0f842637a58e0897e8b68fe06d1e712ffd20ad97 100644 --- a/paddle/fluid/framework/op_registry.h +++ b/paddle/fluid/framework/op_registry.h @@ -67,10 +67,34 @@ struct OperatorRegistrar : public Registrar { class OpRegistry { public: + /** + * @brief Return an OperatorBase constructed by type, inputs, outputs, attrs. + * In dygraph mode, inputs, output, attrs will be set to empty map to + * improve the execution efficiency of dygraph. + * Dygraph mode will use: + * framework::OpRegistry::CreateOp(type, {}, {}, {}, false). + * + * @param[str] type The operator type. + * @param[map] inputs Inputs map of the operator. + * @param[map] outputs Outputs map of the operator. + * @param[unordered_map] attrs Attributes map of the operator. + * @param[bool] attr_check + * Whether do the attribute check before OperatorBase construction. + * Default is true. + * Attr_check is used to control the check of attribute map. + * The check of attribute map have two purposes: + * 1. check whether the attribute item is valid or not. + * 2. add attribute item which has default value + * if it is not in attrs. + * In dygraph mode, attrs is an empty unordered_map, + * attr_check is set to false, otherwise it will be failed + * when check function called. + */ static std::unique_ptr CreateOp(const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, - AttributeMap attrs); + AttributeMap attrs, + bool attr_check = true); static std::unique_ptr CreateOp(const proto::OpDesc& op_desc); diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 22a62d806fa60aaffdf9b5a541552edfc13a02d8..33d9b6cb81f8124a696b3d55fbfdc4cd5f66ac97 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -321,8 +321,14 @@ OperatorBase::OperatorBase(const std::string& type, attrs_(attrs), // NOTE(zjl): why op_info may be nullptr? info_(OpInfoMap::Instance().GetNullable(type)) { - GenerateTemporaryNames(); - CheckAllInputOutputSet(); + // In dygraph mode, all the OperatorBase will be constructed by function: + // framework::OpRegistry::CreateOp(type, {}, {}, {}, false). + // Inputs, outputs and attrs will be set to empty map + // to improve the execution efficiency of dygraph. + if (inputs_.size() > 0 || outputs_.size() > 0) { + GenerateTemporaryNames(); + CheckAllInputOutputSet(); + } } std::vector OperatorBase::InputVars() const { @@ -457,15 +463,14 @@ const Tensor* ExecutionContext::Input(const std::string& name) const { template <> const std::vector ExecutionContext::MultiInput( const std::string& name) const { - auto it = ctx_.inputs.find(name); - if (it == ctx_.inputs.end()) { + auto vars = MultiInputVar(name); + if (vars.size() == 0) { return {}; } - const std::vector& vars = it->second; std::vector res; res.reserve(vars.size()); std::transform(vars.begin(), vars.end(), std::back_inserter(res), - [&](Variable* var) -> const Tensor* { + [&](const Variable* var) -> const Tensor* { if (var == nullptr) return nullptr; PADDLE_ENFORCE( var->IsType(), @@ -484,11 +489,11 @@ Tensor* ExecutionContext::Output(const std::string& name) const { template <> std::vector ExecutionContext::MultiOutput( const std::string& name) const { - auto it = ctx_.outputs.find(name); - if (it == ctx_.outputs.end()) { + auto vars = MultiOutputVar(name); + + if (vars.size() == 0) { return {}; } - const std::vector& vars = it->second; std::vector res; res.reserve(vars.size()); std::transform(vars.begin(), vars.end(), std::back_inserter(res), @@ -580,13 +585,11 @@ class RuntimeInferShapeContext : public InferShapeContext { AttrReader Attrs() const override { return AttrReader(op_.Attrs()); } - const std::vector& Inputs( - const std::string& name) const override { + std::vector Inputs(const std::string& name) const override { return op_.Inputs(name); } - const std::vector& Outputs( - const std::string& name) const override { + std::vector Outputs(const std::string& name) const override { return op_.Outputs(name); } @@ -622,6 +625,51 @@ class RuntimeInferShapeContext : public InferShapeContext { } } + void ShareAllLoD(const std::string& in, + const std::string& out) const override { + auto in_it = ctx_.inputs.find(in); + auto out_it = ctx_.outputs.find(out); + PADDLE_ENFORCE_NE(in_it, ctx_.inputs.end(), + platform::errors::NotFound( + "Input [%s] found error in Op [%s]", in, op_.Type())); + PADDLE_ENFORCE_NE( + out_it, ctx_.outputs.end(), + platform::errors::NotFound("Output [%s] found error in Op [%s]", out, + op_.Type())); + + auto& in_var_list = in_it->second; + auto& out_var_list = out_it->second; + + PADDLE_ENFORCE_EQ( + in_var_list.size(), out_var_list.size(), + platform::errors::PreconditionNotMet( + "Op [%s]: Input var size should be equal with ouput var size", + op_.Type())); + + auto& out_var_names = op_.Outputs(out); + + for (size_t i = 0; i < in_var_list.size(); ++i) { + if (out_var_names[i] == framework::kEmptyVarName) { + continue; + } + + Variable* in_var = in_var_list[i]; + if (!in_var->IsType()) return; + Variable* out_var = out_var_list[i]; + PADDLE_ENFORCE_EQ(out_var->IsType(), true, + platform::errors::PreconditionNotMet( + "The %d-th output of Output(%s) must be LoDTensor.", + i, out_var_names[i])); + auto& in_tensor = in_var->Get(); + auto* out_tensor = out_var->GetMutable(); + out_tensor->set_lod(in_tensor.lod()); +#ifdef PADDLE_WITH_MKLDNN + if (in_tensor.layout() != DataLayout::kMKLDNN) +#endif + out_tensor->set_layout(in_tensor.layout()); + } + } + void ShareLoD(const std::string& in, const std::string& out, size_t i = 0, size_t j = 0) const override { auto in_it = ctx_.inputs.find(in); @@ -1138,7 +1186,7 @@ void OperatorWithKernel::ParseInputDataType( proto::VarType::Type* data_type) const { proto::VarType::Type dafault_data_type = static_cast(-1); - const std::vector vars = ctx.MultiInputVar(name); + const std::vector vars = ctx.MultiInputVar(name); for (size_t i = 0; i < vars.size(); ++i) { const Variable* var = vars[i]; if (var != nullptr) { @@ -1156,7 +1204,7 @@ void OperatorWithKernel::ParseInputDataType( platform::errors::InvalidArgument( "The Tensor in the %s Op's Input Variable %s(%s) is " "not initialized.", - Type(), name, ctx.Inputs(name).at(i))); + Type(), name, ctx.InputNames(name).at(i))); proto::VarType::Type tmp = t->type(); PADDLE_ENFORCE( tmp == *data_type || *data_type == dafault_data_type, @@ -1177,8 +1225,8 @@ proto::VarType::Type OperatorWithKernel::IndicateDataType( proto::VarType::Type dafault_data_type = static_cast(-1); proto::VarType::Type data_type = dafault_data_type; - for (auto& input : ctx.Context().inputs) { - ParseInputDataType(ctx, input.first, &data_type); + for (auto& input : ctx.InNameList()) { + ParseInputDataType(ctx, input, &data_type); } PADDLE_ENFORCE_NE(data_type, dafault_data_type, "DataType should be indicated by input Variable."); diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 6a9af6af31588ead7ce47f2917d04dceba435d6c..7380906588eb44c4fff558e511082ea97f1bb64a 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -238,35 +238,57 @@ class ExecutionContext { device_context_(device_context), ctx_(ctx), kernel_configs_(configs) {} + virtual ~ExecutionContext() {} - const OperatorBase& op() const { return op_; } + virtual std::string InputName(const std::string& name) const { + return op_.Input(name); + } + virtual std::vector InputNames(const std::string& name) const { + return op_.Inputs(name); + } + virtual std::string OutputName(const std::string& name) const { + return op_.Output(name); + } + + virtual std::vector OutputNames(const std::string& name) const { + return op_.Outputs(name); + } + + virtual bool HasAttr(const std::string& name) const { + return op_.HasAttr(name); + } + virtual const AttributeMap& Attrs() const { return op_.Attrs(); } + + const std::string& Type() const { return op_.Type(); } const Scope& scope() const { return scope_; } template inline const T& Attr(const std::string& name) const { - return op_.Attr(name); + return boost::get(GetAttr(name)); } - bool HasAttr(const std::string& name) const { return op_.HasAttr(name); } + virtual const Attribute& GetAttr(const std::string& name) const { + return op_.Attrs().at(name); + } - bool HasInput(const std::string& name) const; + virtual bool HasInput(const std::string& name) const; - bool HasOutput(const std::string& name) const; + virtual bool HasOutput(const std::string& name) const; - size_t InputSize(const std::string& name) const { + virtual size_t InputSize(const std::string& name) const { return op_.Inputs(name).size(); } - size_t OutputSize(const std::string& name) const { + virtual size_t OutputSize(const std::string& name) const { return op_.Outputs(name).size(); } - const Variable* InputVar(const std::string& name) const; + virtual const Variable* InputVar(const std::string& name) const; - Variable* OutputVar(const std::string& name) const; + virtual Variable* OutputVar(const std::string& name) const; - const std::vector MultiInputVar( + virtual const std::vector MultiInputVar( const std::string& name) const { auto it = ctx_.inputs.find(name); if (it == ctx_.inputs.end()) { @@ -275,8 +297,7 @@ class ExecutionContext { return {it->second.begin(), it->second.end()}; } - std::vector MultiOutputVar(const std::string& name) const { - auto names = op_.Outputs(name); + virtual std::vector MultiOutputVar(const std::string& name) const { auto it = ctx_.outputs.find(name); if (it == ctx_.outputs.end()) { return {}; @@ -284,6 +305,17 @@ class ExecutionContext { return it->second; } + virtual std::vector InNameList() const { + std::vector vec_temp; + vec_temp.reserve(ctx_.inputs.size()); + + for (auto& input : ctx_.inputs) { + vec_temp.push_back(input.first); + } + + return vec_temp; + } + template const T* Input(const std::string& name) const { auto* var = InputVar(name); @@ -298,15 +330,14 @@ class ExecutionContext { template const std::vector MultiInput(const std::string& name) const { - auto it = ctx_.inputs.find(name); - if (it == ctx_.inputs.end()) { + auto vars = MultiInputVar(name); + if (vars.size() == 0) { return {}; } - const std::vector& vars = it->second; std::vector res; res.reserve(vars.size()); std::transform(vars.begin(), vars.end(), std::back_inserter(res), - [&](Variable* var) -> const T* { + [&](const Variable* var) -> const T* { return var == nullptr ? nullptr : &var->Get(); }); return res; @@ -314,17 +345,19 @@ class ExecutionContext { template std::vector MultiOutput(const std::string& name) const { - auto it = ctx_.outputs.find(name); - if (it == ctx_.outputs.end()) { + auto vars = MultiOutputVar(name); + + if (vars.size() == 0) { return {}; } - const std::vector& vars = it->second; + std::vector res; res.reserve(vars.size()); std::transform(vars.begin(), vars.end(), std::back_inserter(res), [&](Variable* var) -> T* { return var == nullptr ? nullptr : var->GetMutable(); }); + return res; } @@ -347,16 +380,6 @@ class ExecutionContext { } #endif - //! Get actual name vector for this input. - const std::vector& Inputs(const std::string& name) const { - return op_.Inputs(name); - } - - //! Get actual name vector for this output. - const std::vector& Outputs(const std::string& name) const { - return op_.Outputs(name); - } - template Tensor AllocateTmpTensor(const framework::DDim& dim, const DevContext& dev_ctx) const { @@ -385,7 +408,9 @@ class ExecutionContext { return *boost::get>((*kernel_configs_)[idx]); } - const RuntimeContext& Context() const { return ctx_; } + const RuntimeContext Context() const { return ctx_; } + + std::string DebugString() const { return op_.DebugString(); } private: const OperatorBase& op_; diff --git a/paddle/fluid/framework/operator_test.cc b/paddle/fluid/framework/operator_test.cc index 77db37197095d780c47f5f76f9893cbe94c14e41..e9d6a9e8a02a874f84f71a2c64823bae4f25d88d 100644 --- a/paddle/fluid/framework/operator_test.cc +++ b/paddle/fluid/framework/operator_test.cc @@ -135,10 +135,10 @@ template class CPUKernelTest : public OpKernel { public: void Compute(const ExecutionContext& ctx) const { - std::cout << ctx.op().DebugString() << std::endl; + std::cout << ctx.DebugString() << std::endl; cpu_kernel_run_num++; - ASSERT_EQ(ctx.op().Input("x"), "IN1"); - ASSERT_EQ(ctx.op().Output("y"), "OUT1"); + ASSERT_EQ(ctx.InputName("x"), "IN1"); + ASSERT_EQ(ctx.OutputName("y"), "OUT1"); } }; @@ -146,10 +146,10 @@ template class CPUKernel2Test : public OpKernel { public: void Compute(const ExecutionContext& ctx) const { - std::cout << ctx.op().DebugString() << std::endl; + std::cout << ctx.DebugString() << std::endl; cpu_kernel2_run_num++; - ASSERT_EQ(ctx.op().Input("x"), "IN1"); - ASSERT_EQ(ctx.op().Output("y"), "OUT1"); + ASSERT_EQ(ctx.InputName("x"), "IN1"); + ASSERT_EQ(ctx.OutputName("y"), "OUT1"); } }; @@ -172,7 +172,7 @@ class OpKernelTestMultiInputsProtoAndCheckerMaker class CPUKernalMultiInputsTest : public OpKernel { public: void Compute(const ExecutionContext& ctx) const { - auto xs = ctx.op().Inputs("xs"); + auto xs = ctx.InputNames("xs"); ASSERT_EQ(xs.size(), 3UL); ASSERT_EQ(xs[0], "x0"); ASSERT_EQ(xs[1], "x1"); @@ -196,10 +196,10 @@ class CPUKernalMultiInputsTest : public OpKernel { auto outTensor0 = ctx.MultiOutput("ys"); ASSERT_EQ(outTensor0.size(), 2U); - auto k = ctx.op().Input("k"); + auto k = ctx.InputName("k"); ASSERT_EQ(k, "k0"); - auto ys = ctx.op().Outputs("ys"); + auto ys = ctx.OutputNames("ys"); ASSERT_EQ(ys.size(), 2UL); ASSERT_EQ(ys[0], "y0"); ASSERT_EQ(ys[1], "y1"); @@ -496,6 +496,41 @@ TEST(IndicateVarDataTypeTest, other) { ASSERT_TRUE(caught); } +TEST(ExecutionContextAttrAndInOut, new_api) { + paddle::framework::InitDevices(true); + paddle::framework::proto::OpDesc op_desc; + op_desc.set_type("test_operator"); + BuildVar("input", {"IN1"}, op_desc.add_inputs()); + BuildVar("output", {"OUT1"}, op_desc.add_outputs()); + + auto attr = op_desc.mutable_attrs()->Add(); + attr->set_name("scale"); + attr->set_type(paddle::framework::proto::AttrType::FLOAT); + attr->set_f(3.14); + + paddle::platform::CPUPlace cpu_place; + paddle::framework::Scope scope; + + auto op = paddle::framework::OpRegistry::CreateOp(op_desc); + auto* var = scope.Var("OUT1"); + var->GetMutable(); + + paddle::platform::DeviceContextPool& pool = + paddle::platform::DeviceContextPool::Instance(); + auto* dev_ctx = pool.Get(cpu_place); + + paddle::framework::RuntimeContext ctx({}, {}); + paddle::framework::ExecutionContext exe_context(*(op.get()), scope, *dev_ctx, + ctx, nullptr); + + ASSERT_EQ(exe_context.InputSize("input"), 1u); + ASSERT_EQ(exe_context.OutputSize("output"), 1u); + + auto attr_map = exe_context.Attrs(); + ASSERT_EQ(boost::get(attr_map["scale"]), 3.14f); + ASSERT_EQ(exe_context.Type(), "test_operator"); +} + namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/shape_inference.h b/paddle/fluid/framework/shape_inference.h index 73dd621a27ebda27002499b7d04ffe845d509435..7ce8deb7cfc70d39de52e1fd9e5bace969f854e7 100644 --- a/paddle/fluid/framework/shape_inference.h +++ b/paddle/fluid/framework/shape_inference.h @@ -54,16 +54,18 @@ class InferShapeContext { const std::vector &dims); virtual AttrReader Attrs() const = 0; - virtual const std::vector &Inputs( - const std::string &name) const = 0; - virtual const std::vector &Outputs( - const std::string &name) const = 0; + virtual std::vector Inputs(const std::string &name) const = 0; + virtual std::vector Outputs(const std::string &name) const = 0; virtual void ShareDim(const std::string &in, const std::string &out, size_t i = 0, size_t j = 0) = 0; virtual void ShareLoD(const std::string &in, const std::string &out, size_t i = 0, size_t j = 0) const = 0; + // share the lod information of all the tensor from in to out. + // out_vars[i].lod = in_vars[i].lod + virtual void ShareAllLoD(const std::string &in, + const std::string &out) const = 0; virtual int32_t GetLoDLevel(const std::string &in, size_t i = 0) const = 0; diff --git a/paddle/fluid/imperative/engine.cc b/paddle/fluid/imperative/engine.cc index 0d9edce4db879cbfa0d674f964f233145501f518..c089997059441f93e8fcbc84d46825316df18e0a 100644 --- a/paddle/fluid/imperative/engine.cc +++ b/paddle/fluid/imperative/engine.cc @@ -152,8 +152,6 @@ void BasicEngine::PrepareDeps() { q.pop(); VLOG(3) << "Checking grads of op " << cur_op->Type(); - CheckBackwardInputs(cur_op); - SetBackwardOutputs(cur_op); PrepareGradAccumulators(cur_op); @@ -189,6 +187,9 @@ void BasicEngine::Execute() { OpBase* cur_op = q.front(); q.pop(); + // CheckBackWardInput + CheckBackwardInputs(cur_op); + // Step 1: Run Backward auto& bwd_ins = cur_op->GetInsMap(); auto& bwd_outs = cur_op->GetOutsMap(); @@ -210,7 +211,6 @@ void BasicEngine::Execute() { } } } - VLOG(3) << "Start to execute grad op " << cur_op->Type(); RunOp(cur_op, bwd_ins, tmp_outs, cur_op->place()); // Step 2: Sum Gradient diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index 573f75e1de8cc45efdc7a0f701c0e603647144fe..8a25b4220c4039791f43ebcdf1e53ba5b7851432 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -190,6 +190,7 @@ void VarBase::AddGradOps(const std::weak_ptr& op) { void VarBase::ClearGradient() { if (grad_var_) { auto* grad_t = grad_var_->var_.GetMutable(); + if (grad_t->IsInitialized()) { auto* dev_ctx = platform::DeviceContextPool::Instance().Get(grad_t->place()); @@ -241,18 +242,9 @@ OpBase::OpBase(size_t id, const std::string& type, const NameVarBaseMap& ins, info.Checker()->Check(&attrs_); } - auto input_name_map = CreateVarNameMap(info, type, ins, true); - auto output_name_map = CreateVarNameMap(info, type, outs, false); - op_ = framework::OpRegistry::CreateOp(type, std::move(input_name_map), - std::move(output_name_map), attrs); - VLOG(3) << "Construct Op: " << type << std::endl; -} + op_ = framework::OpRegistry::CreateOp(type, {}, {}, {}, false); -// create OpBase from opdesc -OpBase::OpBase(size_t id, const framework::OpDesc& op_desc, - const platform::Place& place) - : id_(id), op_(framework::OpRegistry::CreateOp(op_desc)), place_(place) { - VLOG(3) << "Construct Op: " << op_desc.Type() << std::endl; + VLOG(3) << "Construct Op: " << type << std::endl; } void OpBase::CreateOperatorBase() { @@ -260,11 +252,7 @@ void OpBase::CreateOperatorBase() { if (info.Checker() != nullptr) { info.Checker()->Check(&attrs_); } - - auto input_name_map = CreateVarNameMap(info, type_, ins_, true); - auto output_name_map = CreateVarNameMap(info, type_, outs_, false); - op_ = framework::OpRegistry::CreateOp(type_, std::move(input_name_map), - std::move(output_name_map), attrs_); + op_ = framework::OpRegistry::CreateOp(type_, {}, {}, {}, false); } void OpBase::Run(const NameVarBaseMap& ins, const NameVarBaseMap& outs) { @@ -272,10 +260,9 @@ void OpBase::Run(const NameVarBaseMap& ins, const NameVarBaseMap& outs) { PADDLE_ENFORCE_NOT_NULL(op_kernel, "only support op with kernel"); auto& info = op_->Info(); if (info.infer_var_type_) { - RuntimeInferVarTypeContext infer_var_type_ctx(ins, &outs, op_->Attrs()); + RuntimeInferVarTypeContext infer_var_type_ctx(ins, &outs, attrs_); info.infer_var_type_(&infer_var_type_ctx); } - // Initialize output var type for (auto& var_pair : outs) { for (auto& var : var_pair.second) { @@ -285,13 +272,11 @@ void OpBase::Run(const NameVarBaseMap& ins, const NameVarBaseMap& outs) { VLOG(3) << "Running Op " << Type(); VLOG(5) << LayerDebugString(Type(), ins, outs); - auto runtime_ctx = PrepareRuntimeContext(ins, outs); - - VLOG(6) << "start preparing op: " << Type(); - auto prepared_op = PreparedOp::Prepare(runtime_ctx, *op_kernel, place(), ins); + framework::RuntimeContext runtime_ctx({}, {}); + auto prepared_op = + PreparedOp::Prepare(ins, outs, *op_kernel, place(), &attrs_); - VLOG(6) << "finish preparing op: " << Type(); - prepared_op.Run(); + prepared_op.Run(&ins, &outs, &attrs_); VLOG(4) << LayerDebugString(Type(), ins, outs); } diff --git a/paddle/fluid/imperative/layer.h b/paddle/fluid/imperative/layer.h index 2d8057910e09912ce7e3654c241f5d7a94c3527d..4bc7955c2506eee5e247e93682c4e8549f5c27ca 100644 --- a/paddle/fluid/imperative/layer.h +++ b/paddle/fluid/imperative/layer.h @@ -27,6 +27,10 @@ #include #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/shape_inference.h" +#include "paddle/fluid/framework/type_defs.h" +#include "paddle/fluid/framework/var_desc.h" +#include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/framework/var_type_inference.h" #include "paddle/fluid/framework/variable.h" #include "paddle/fluid/imperative/flags.h" @@ -209,6 +213,160 @@ class Layer { } }; +class DygraphExecutionContext : public framework::ExecutionContext { + using Variable = framework::Variable; + + public: + DygraphExecutionContext(const framework::OperatorBase& op, + const framework::Scope& scope, + const platform::DeviceContext& device_context, + const framework::RuntimeContext& ctx, + std::vector* configs, + const NameVarBaseMap& var_base_map_in, + const NameVarBaseMap& var_base_map_out, + const framework::AttributeMap* attrs) + : ExecutionContext(op, scope, device_context, ctx, configs), + var_base_map_in_(var_base_map_in), + var_base_map_out_(var_base_map_out), + attrs_(attrs) {} + + std::string InputName(const std::string& name) const { + auto it = var_base_map_in_.find(name); + PADDLE_ENFORCE_NE(it, var_base_map_in_.end(), + platform::errors::PreconditionNotMet( + "Can not find [%s] in Input", name)); + return it->second[0]->Name(); + } + std::vector InputNames(const std::string& name) const { + auto it = var_base_map_in_.find(name); + PADDLE_ENFORCE_NE( + it, var_base_map_in_.end(), + platform::errors::NotFound("Can not find [%s] in Input", name)); + std::vector vec_res; + vec_res.reserve(it->second.size()); + for (size_t i = 0; i < it->second.size(); ++i) { + vec_res.push_back(it->second[i]->Name()); + } + return vec_res; + } + + std::string OuputName(const std::string& name) const { + auto it = var_base_map_out_.find(name); + PADDLE_ENFORCE_NE( + it, var_base_map_out_.end(), + platform::errors::NotFound("Can not find [%s] in Output", name)); + return it->second[0]->Name(); + } + + std::vector OutputNames(const std::string& name) const { + auto it = var_base_map_out_.find(name); + PADDLE_ENFORCE_NE( + it, var_base_map_out_.end(), + platform::errors::NotFound("Can not find [%s] in Output", name)); + std::vector vec_res; + vec_res.reserve(it->second.size()); + for (size_t i = 0; i < it->second.size(); ++i) { + vec_res.push_back(it->second[i]->Name()); + } + return vec_res; + } + + bool HasAttr(const std::string& name) const { return attrs_->count(name); } + + const framework::AttributeMap& Attrs() const { return *attrs_; } + + const framework::Attribute& GetAttr(const std::string& name) const { + auto it = attrs_->find(name); + + PADDLE_ENFORCE_NE( + it, attrs_->end(), + platform::errors::NotFound("can not find [%s] in attrs", name)); + + return it->second; + } + + std::vector InNameList() const { + std::vector vec_temp; + vec_temp.reserve(var_base_map_in_.size()); + + for (auto& v : var_base_map_in_) { + vec_temp.push_back(v.first); + } + + return vec_temp; + } + bool HasInput(const std::string& name) const { + auto it = var_base_map_in_.find(name); + return (it != var_base_map_in_.end() && it->second.size() > 0); + } + + virtual bool HasOutput(const std::string& name) const { + auto it = var_base_map_out_.find(name); + return (it != var_base_map_out_.end() && it->second.size() > 0); + } + + size_t InputSize(const std::string& name) const { + return InputNames(name).size(); + } + + size_t OutputSize(const std::string& name) const { + return OutputNames(name).size(); + } + + const Variable* InputVar(const std::string& name) const override { + auto it = var_base_map_in_.find(name); + if (it == var_base_map_in_.end()) { + return nullptr; + } + + return it->second.empty() ? nullptr : it->second[0]->MutableVar(); + } + + Variable* OutputVar(const std::string& name) const { + auto it = var_base_map_out_.find(name); + if (it == var_base_map_out_.end()) { + return nullptr; + } + + return it->second.empty() ? nullptr : it->second[0]->MutableVar(); + } + + const std::vector MultiInputVar( + const std::string& name) const override { + auto it = var_base_map_in_.find(name); + if (it == var_base_map_in_.end()) { + return {}; + } + std::vector vec_res; + vec_res.reserve(it->second.size()); + for (size_t i = 0; i < it->second.size(); ++i) { + vec_res.push_back(it->second[i]->MutableVar()); + } + + return vec_res; + } + + std::vector MultiOutputVar( + const std::string& name) const override { + auto it = var_base_map_out_.find(name); + if (it == var_base_map_out_.end()) { + return {}; + } + std::vector vec_res; + vec_res.reserve(it->second.size()); + for (size_t i = 0; i < it->second.size(); ++i) { + vec_res.push_back(it->second[i]->MutableVar()); + } + + return vec_res; + } + + private: + const NameVarBaseMap& var_base_map_in_; + const NameVarBaseMap& var_base_map_out_; + const framework::AttributeMap* attrs_; +}; + // infer var type context for imperative mode class RuntimeInferVarTypeContext : public framework::InferVarTypeContext { public: @@ -377,7 +535,7 @@ class OpBase : public std::enable_shared_from_this { return op_->Outputs(); } - const framework::AttributeMap& Attrs() const { return op_->Attrs(); } + const framework::AttributeMap& Attrs() const { return attrs_; } const framework::OpInfo& Info() const { return op_->Info(); } void ClearBackwardTrace(); @@ -419,9 +577,6 @@ class OpBase : public std::enable_shared_from_this { const NameVarBaseMap& outs, const framework::AttributeMap& attrs, const platform::Place& place); - OpBase(size_t id, const framework::OpDesc& op_desc, - const platform::Place& place); - public: OpBase() {} @@ -485,5 +640,318 @@ class OpBase : public std::enable_shared_from_this { framework::AttributeMap attrs_; }; +class DygraphInferShapeContext : public framework::InferShapeContext { + using DDim = framework::DDim; + + public: + DygraphInferShapeContext(const NameVarBaseMap* in, const NameVarBaseMap* out, + const framework::AttributeMap* attr) + : var_base_map_in_(in), var_base_map_out_(out), attrs_(attr) {} + + bool HasInput(const std::string& name) const override { + // has only one input + auto it = var_base_map_in_->find(name); + + if (it == var_base_map_in_->end()) { + return false; + } + const auto& in = it->second; + if (in.size() == 0) return false; + PADDLE_ENFORCE_EQ( + in.size(), 1UL, + platform::errors::PreconditionNotMet( + "Input %s should not have more than one inputs", name)); + return in[0] != nullptr; + } + + bool HasOutput(const std::string& name) const override { + // has only one output + auto it = var_base_map_out_->find(name); + if (it == var_base_map_out_->end()) { + return false; + } + const auto& out = it->second; + if (out.size() == 0) { + return false; + } + PADDLE_ENFORCE_EQ( + out.size(), 1UL, + platform::errors::PreconditionNotMet( + "Output %s should not have more than one outputs", name)); + return out[0] != nullptr; + } + + bool HasInputs(const std::string& name) const override { + auto it = var_base_map_in_->find(name); + if (it == var_base_map_in_->end() || it->second.empty()) { + return false; + } + for (auto& input : it->second) { + if (input == nullptr) { + return false; + } + } + return true; + } + + bool HasOutputs(const std::string& name) const override { + auto it = var_base_map_out_->find(name); + if (it == var_base_map_out_->end() || it->second.empty()) { + return false; + } + for (auto& output : it->second) { + if (output == nullptr) { + return false; + } + } + return true; + } + + framework::AttrReader Attrs() const override { + return framework::AttrReader(*attrs_); + } + + std::vector Inputs(const std::string& name) const override { + // return op_.Inputs(name); + std::vector vec_res; + auto it = var_base_map_in_->find(name); + PADDLE_ENFORCE_NE( + it, var_base_map_in_->end(), + platform::errors::NotFound("can not find [%s] in input", name)); + + vec_res.reserve(it->second.size()); + for (auto& var : it->second) { + vec_res.push_back(var->Name()); + } + + return vec_res; + } + + std::vector Outputs(const std::string& name) const override { + std::vector vec_res; + auto it = var_base_map_out_->find(name); + PADDLE_ENFORCE_NE( + it, var_base_map_out_->end(), + platform::errors::NotFound("can not find [%s] in output", name)); + + vec_res.reserve(it->second.size()); + for (auto& var : it->second) { + vec_res.push_back(var->Name()); + } + + return vec_res; + } + + void ShareDim(const std::string& in, const std::string& out, size_t i = 0, + size_t j = 0) override { + auto in_it = var_base_map_in_->find(in); + auto out_it = var_base_map_out_->find(out); + PADDLE_ENFORCE_NE( + in_it, var_base_map_in_->end(), + platform::errors::NotFound("can not found [%s] in input", in)); + PADDLE_ENFORCE_GT(in_it->second.size(), i, + platform::errors::PreconditionNotMet( + "Inputs %s should have %llu argument", in, i)); + PADDLE_ENFORCE_NE( + out_it, var_base_map_out_->end(), + platform::errors::NotFound("can not found [%s] in input", in)); + PADDLE_ENFORCE_GT(out_it->second.size(), j, + platform::errors::PreconditionNotMet( + "Outputs %s should have %llu argument", out, j)); + + framework::Variable* in_var = in_it->second[i]->MutableVar(); + framework::Variable* out_var = out_it->second[j]->MutableVar(); + + PADDLE_ENFORCE_EQ(in_var->Type(), out_var->Type(), + platform::errors::PreconditionNotMet( + "The type of %s and %s is not the same.", in, out)); + + auto& in_lod_tensor = in_var->Get(); + auto* out_lod_tensor = out_var->GetMutable(); + out_lod_tensor->Resize(in_lod_tensor.dims()); + } + + void ShareAllLoD(const std::string& in, + const std::string& out) const override { + // do nothing + } + void ShareLoD(const std::string& in, const std::string& out, size_t i = 0, + size_t j = 0) const override { + // do nothing + } + + bool IsRuntime() const override { return true; } + + // TODO(paddle-dev): Can this be template? + std::vector GetInputVarPtrs( + const std::string& name) override { + PADDLE_THROW(platform::errors::PermissionDenied( + "GetInputVarPtrs not support in dygraph runtime context")); + } + + std::vector GetOutputVarPtrs( + const std::string& name) override { + PADDLE_THROW(platform::errors::PermissionDenied( + "GetOutputVarPtrs not support in dygraph runtime context")); + } + + DDim GetInputDim(const std::string& name) const override { + auto it = var_base_map_in_->find(name); + PADDLE_ENFORCE_NE( + it, var_base_map_in_->end(), + platform::errors::NotFound("can not find [%s] in input", name)); + PADDLE_ENFORCE_EQ( + it->second.size(), 1UL, + platform::errors::PreconditionNotMet( + "Input(%s) should hold one element, but now it holds %d", name, + it->second.size())); + return this->GetDim(it->second[0]->MutableVar()); + } + + std::vector GetInputsDim(const std::string& name) const override { + // const std::vector& vars = InputVars(name); + std::vector vec_res; + auto it = var_base_map_in_->find(name); + PADDLE_ENFORCE_NE( + it, var_base_map_in_->end(), + platform::errors::NotFound("can not find [%s] in output", name)); + vec_res.reserve(it->second.size()); + for (size_t i = 0; i < it->second.size(); ++i) { + vec_res.emplace_back(GetDim(it->second[i]->MutableVar())); + } + + return vec_res; + } + + std::vector GetInputsVarType( + const std::string& name) const override { + std::vector vec_res; + auto it = var_base_map_in_->find(name); + PADDLE_ENFORCE_NE( + it, var_base_map_in_->end(), + platform::errors::NotFound("can not find [%s] in input", name)); + vec_res.reserve(it->second.size()); + for (size_t i = 0; i < it->second.size(); ++i) { + vec_res.emplace_back( + framework::ToVarType(it->second[i]->MutableVar()->Type())); + } + return vec_res; + } + + std::vector GetOutputsVarType( + const std::string& name) const override { + std::vector vec_res; + auto it = var_base_map_out_->find(name); + PADDLE_ENFORCE_NE( + it, var_base_map_out_->end(), + platform::errors::NotFound("can not find [%s] in output", name)); + vec_res.reserve(it->second.size()); + for (size_t i = 0; i < it->second.size(); ++i) { + vec_res.emplace_back( + framework::ToVarType(it->second[i]->MutableVar()->Type())); + } + return vec_res; + } + + void SetOutputDim(const std::string& name, const DDim& dim) override { + auto it = var_base_map_out_->find(name); + PADDLE_ENFORCE_NE( + it, var_base_map_out_->end(), + platform::errors::NotFound("can not find [%s] in output", name)); + + SetDim(it->second[0]->MutableVar(), dim); + } + + void SetOutputsDim(const std::string& name, + const std::vector& dims) override { + // auto& vars = OutputVars(name); + // SetDims(vars, dims); + + auto it = var_base_map_out_->find(name); + PADDLE_ENFORCE_NE( + it, var_base_map_out_->end(), + platform::errors::NotFound("can not find [%s] in output", name)); + + PADDLE_ENFORCE_EQ(it->second.size(), dims.size(), + platform::errors::PreconditionNotMet( + "dim size [%d] is not match output var number [%d]", + dims.size(), it->second.size())); + + for (size_t i = 0; i < dims.size(); ++i) { + SetDim(it->second[i]->MutableVar(), dims[i]); + } + } + + int32_t GetLoDLevel(const std::string& in, size_t i = 0) const override { + PADDLE_THROW(platform::errors::PermissionDenied( + "GetLoDLevel function not support in dygraph mode")); + } + + void SetLoDLevel(const std::string& out, int32_t lod_level, + size_t j = 0) const override { + PADDLE_THROW(platform::errors::PermissionDenied( + "SetLoDLevel function not support in dygraph mode")); + } + + protected: + DDim GetDim(framework::Variable* var) const { + PADDLE_ENFORCE_NOT_NULL(var, platform::errors::PreconditionNotMet( + "Input variable should not be null")); + if (var->IsType()) { + return var->Get().dims(); + } else if (var->IsType()) { + return var->Get().GetCompleteDims(); + } else { + PADDLE_THROW(platform::errors::PermissionDenied( + "Only LoDTensor/SelectedRows support 'GetDim', but Variables " + "type_id is xx.")); + } + } + + std::vector GetRepeatedDims(const std::string& name) const override { + PADDLE_THROW(platform::errors::PermissionDenied( + "GetRepeatedDims not support in dygraph runtime")); + } + + void SetDim(framework::Variable* var, const DDim& dim) { + if (var->IsType()) { + var->GetMutable()->Resize(dim); + } else if (var->IsType()) { + var->GetMutable()->set_height(dim[0]); + } else { + PADDLE_THROW(platform::errors::PermissionDenied( + "Variable type_id %s, expect LoDTensor/SelectedRows.")); + } + } + + void SetDims(const std::vector& vars, + const std::vector& dims) { + size_t length = vars.size(); + PADDLE_ENFORCE_EQ( + length, dims.size(), + platform::errors::PreconditionNotMet( + "Vars number [%d] should be equal with dims number [%d]", length, + dims.size())); + for (size_t i = 0; i < length; ++i) { + if (vars[i] == nullptr) { + continue; + } + SetDim(vars[i], dims[i]); + } + } + + void SetRepeatedDims(const std::string& name, + const std::vector& dims) override { + PADDLE_THROW(platform::errors::PermissionDenied( + "SetRepeatedDims not support in dygraph runtime")); + } + + private: + const NameVarBaseMap* var_base_map_in_; + const NameVarBaseMap* var_base_map_out_; + std::string type_; + const framework::AttributeMap* attrs_; +}; + } // namespace imperative } // namespace paddle diff --git a/paddle/fluid/imperative/prepared_operator.cc b/paddle/fluid/imperative/prepared_operator.cc index b3ec5af445cc70979fbb3771a7df7a6b074c0ecb..8356ad0cb5575fbbc85065397349d27547b3c070 100644 --- a/paddle/fluid/imperative/prepared_operator.cc +++ b/paddle/fluid/imperative/prepared_operator.cc @@ -70,10 +70,11 @@ PreparedOp::PreparedOp(const framework::OperatorBase& op, dev_ctx_(dev_ctx), kernel_configs_(kernel_configs) {} -PreparedOp PreparedOp::Prepare(const framework::RuntimeContext& ctx, +PreparedOp PreparedOp::Prepare(const NameVarBaseMap& ins, + const NameVarBaseMap& outs, const framework::OperatorWithKernel& op, platform::Place place, - const NameVarBaseMap& ins) { + const framework::AttributeMap* attrs) { platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto* dev_ctx = pool.Get(place); @@ -88,9 +89,9 @@ PreparedOp PreparedOp::Prepare(const framework::RuntimeContext& ctx, auto& kernels = kernels_iter->second; - auto expected_kernel_key = - op.GetExpectedKernelType(framework::ExecutionContext( - op, framework::Scope(), *dev_ctx, ctx, nullptr)); + framework::RuntimeContext ctx({}, {}); + auto expected_kernel_key = op.GetExpectedKernelType(DygraphExecutionContext( + op, framework::Scope(), *dev_ctx, ctx, nullptr, ins, outs, attrs)); VLOG(3) << "expected_kernel_key:" << expected_kernel_key; auto kernel_iter = kernels.find(expected_kernel_key); @@ -111,13 +112,20 @@ PreparedOp PreparedOp::Prepare(const framework::RuntimeContext& ctx, return PreparedOp(op, ctx, kernel_iter->second, dev_ctx, kernel_configs); } -void PreparedOp::Run() { +void PreparedOp::Run(const NameVarBaseMap* in, const NameVarBaseMap* out, + const framework::AttributeMap* attrs) { // TODO(zjl): remove scope in dygraph framework::Scope scope; - op_.RuntimeInferShape(scope, dev_ctx_->GetPlace(), ctx_); - VLOG(6) << "Finish Runtime infer shape"; - func_(framework::ExecutionContext(op_, scope, *dev_ctx_, ctx_, - kernel_configs_)); + + DygraphInferShapeContext infer_shape_ctx(in, out, attrs); + + framework::OperatorWithKernel* op_ker = + (framework::OperatorWithKernel*)(&op_); + + op_ker->InferShape(&infer_shape_ctx); + + func_(DygraphExecutionContext(op_, scope, *dev_ctx_, ctx_, kernel_configs_, + *in, *out, attrs)); } } // namespace imperative diff --git a/paddle/fluid/imperative/prepared_operator.h b/paddle/fluid/imperative/prepared_operator.h index 886311f8c82fce4b3b1cd46bbe2ac6e5f22c50e5..41a18357b98e6759e108581d001149267fffbe3e 100644 --- a/paddle/fluid/imperative/prepared_operator.h +++ b/paddle/fluid/imperative/prepared_operator.h @@ -30,13 +30,16 @@ const framework::Tensor* GetTensorFromVar(const framework::Variable& var); class PreparedOp { public: - static PreparedOp Prepare(const framework::RuntimeContext& ctx, + static PreparedOp Prepare(const NameVarBaseMap& ins, + const NameVarBaseMap& outs, const framework::OperatorWithKernel& op, - platform::Place place, const NameVarBaseMap& ins); + platform::Place place, + const framework::AttributeMap* attrs); inline platform::DeviceContext* GetDeviceContext() const { return dev_ctx_; } - void Run(); + void Run(const NameVarBaseMap* in, const NameVarBaseMap* out, + const framework::AttributeMap* attrs); static void PrepareData(const platform::Place& place, const NameVarBaseMap& ins, diff --git a/paddle/fluid/imperative/tests/test_layer.cc b/paddle/fluid/imperative/tests/test_layer.cc index c92d0fd67c9c055d5b37a959ce0997c734b29a82..be7e6fe160d37453b5cbe14662f11e9f4b40cd92 100644 --- a/paddle/fluid/imperative/tests/test_layer.cc +++ b/paddle/fluid/imperative/tests/test_layer.cc @@ -148,6 +148,67 @@ TEST(test_layer, test_varbase_basic) { } // TODO(jiabin): Add more ut here for layer +TEST(test_layer, test_dygraph_execution_context) { + std::shared_ptr vin( + new imperative::VarBase(false, "vin")); + std::shared_ptr vout( + new imperative::VarBase(false, "vout")); + framework::OpDesc desc; + platform::CPUPlace place; + var_pair x_pair = var_pair("X", vb_vector(1, vin)); + var_pair y_pair = var_pair("Y", vb_vector(1, vin)); + var_pair out_pair = var_pair("Out", vb_vector(1, vout)); + imperative::NameVarBaseMap ins = {x_pair, y_pair}; + imperative::NameVarBaseMap outs = {out_pair}; + + framework::AttributeMap concat_att_map; + concat_att_map["axis"] = 1; + + auto op = framework::OpRegistry::CreateOp("mul", {}, {}, {}, false); + paddle::platform::CPUPlace cpu_place; + + paddle::platform::DeviceContextPool& pool = + paddle::platform::DeviceContextPool::Instance(); + auto* dev_ctx = pool.Get(cpu_place); + paddle::framework::RuntimeContext ctx({}, {}); + framework::Scope scope; + + DygraphExecutionContext dy_exe_context(*(op.get()), scope, *dev_ctx, ctx, + nullptr, ins, outs, &concat_att_map); + + ASSERT_EQ(dy_exe_context.InputSize("X"), 1u); + ASSERT_EQ(dy_exe_context.InputName("X"), "vin"); + ASSERT_EQ(dy_exe_context.HasAttr("axis"), true); + auto attr_map = dy_exe_context.Attrs(); + ASSERT_EQ(boost::get(attr_map["axis"]), 1); + ASSERT_EQ(dy_exe_context.OutputSize("Out"), 1u); + ASSERT_EQ(dy_exe_context.HasOutput("Out"), true); +} + +TEST(test_layer, test_dygraph_infershape_context) { + std::shared_ptr vin( + new imperative::VarBase(false, "vin")); + std::shared_ptr vout( + new imperative::VarBase(false, "vout")); + framework::OpDesc desc; + platform::CPUPlace place; + var_pair x_pair = var_pair("X", vb_vector(1, vin)); + var_pair y_pair = var_pair("Y", vb_vector(1, vin)); + var_pair out_pair = var_pair("Out", vb_vector(1, vout)); + imperative::NameVarBaseMap ins = {x_pair, y_pair}; + imperative::NameVarBaseMap outs = {out_pair}; + + framework::AttributeMap concat_att_map; + concat_att_map["axis"] = 1; + + DygraphInferShapeContext infer_shape_ctx(&ins, &outs, &concat_att_map); + + bool have_x = infer_shape_ctx.HasOutputs("Out"); + ASSERT_EQ(have_x, true); + bool have_z = infer_shape_ctx.HasOutputs("Z"); + ASSERT_EQ(have_z, false); +} + } // namespace imperative } // namespace paddle diff --git a/paddle/fluid/imperative/tests/test_prepare_op.cc b/paddle/fluid/imperative/tests/test_prepare_op.cc index 1a30868da041eb0c7dc2d7ed9308871f231f5ab9..48065eafa71089cf48bcc86a84e3658c08e5254e 100644 --- a/paddle/fluid/imperative/tests/test_prepare_op.cc +++ b/paddle/fluid/imperative/tests/test_prepare_op.cc @@ -110,8 +110,8 @@ TEST(test_prepare_op, test_prepare_op) { framework::OperatorWithKernel op("split", var_in_map, var_out_map, split_attr_map); framework::RuntimeContext ctx = PrepareRuntimeContext(ins, outs); - ASSERT_NO_FATAL_FAILURE(PreparedOp preparedOp = - PreparedOp::Prepare(ctx, op, place, ins)); + ASSERT_NO_FATAL_FAILURE(PreparedOp preparedOp = PreparedOp::Prepare( + ins, outs, op, place, &split_attr_map)); } const framework::Tensor* GetTensorFromVar(const framework::Variable& var); @@ -158,7 +158,8 @@ TEST(test_prepare_op, test_prepare_data) { framework::RuntimeContext ctx = PrepareRuntimeContext(ins, outs); // test if it can be transformed to GPU place - PreparedOp prepared_op = PreparedOp::Prepare(ctx, assign_op, gpu_place, ins); + PreparedOp prepared_op = + PreparedOp::Prepare(ins, outs, assign_op, gpu_place, &assign_attr_map); for (const auto& name_pair : ins) { for (const auto& vb : name_pair.second) { ASSERT_TRUE(platform::is_same_place( @@ -201,7 +202,8 @@ TEST(test_prepare_op, test_prepare_data_same_place) { framework::RuntimeContext ctx = PrepareRuntimeContext(ins, outs); // test if it never transfered on GPU place - PreparedOp prepared_op = PreparedOp::Prepare(ctx, assign_op, cpu_place, ins); + PreparedOp prepared_op = + PreparedOp::Prepare(ins, outs, assign_op, cpu_place, &assign_attr_map); for (const auto& name_pair : ins) { for (const auto& vb : name_pair.second) { ASSERT_TRUE(platform::is_same_place( diff --git a/paddle/fluid/imperative/tracer.cc b/paddle/fluid/imperative/tracer.cc index 536aeb3c4741d04512b9fd7e430030e592b27b7a..9ffb941482024f8d1b94835fb42599a6f27cb43e 100644 --- a/paddle/fluid/imperative/tracer.cc +++ b/paddle/fluid/imperative/tracer.cc @@ -82,10 +82,9 @@ static void PassStopGradient(const NameVarBaseMap& outs, bool generate_grad) { void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins, const NameVarBaseMap& outs, framework::AttributeMap attrs, const platform::Place& place, bool trace_backward) { - platform::RecordEvent event(type); VLOG(1) << "Trace Op: " << type; size_t op_id = GenerateUniqueId(); - auto op = OpBase::Create(op_id, type, ins, outs, std::move(attrs), place); + auto op = OpBase::Create(op_id, type, ins, outs, attrs, place); op->Run(ins, outs); if (enable_program_desc_tracing_) { diff --git a/paddle/fluid/operators/activation_op.h b/paddle/fluid/operators/activation_op.h index 9f0203b2149c86e7dd475ffe1df9fd07037a9f73..b94da368ce41d6c20fde287aba64ab086113756d 100644 --- a/paddle/fluid/operators/activation_op.h +++ b/paddle/fluid/operators/activation_op.h @@ -62,11 +62,11 @@ inline void ExtractActivationTensor(const framework::ExecutionContext& context, auto out_var = context.OutputVar("Out"); PADDLE_ENFORCE(x_var != nullptr, "Cannot get input Variable X, variable name = %s", - context.op().Input("X")); + context.InputName("X")); PADDLE_ENFORCE(out_var != nullptr, "Cannot get output Variable Out, variable name = %s", - context.op().Output("Out")); - if (CanBeUsedBySelectedRows.count(context.op().Type())) { + context.OutputName("Out")); + if (CanBeUsedBySelectedRows.count(context.Type())) { *X = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_var); *Out = paddle::framework::GetMutableLoDTensorOrSelectedRowsValueFromVar( out_var); @@ -77,7 +77,7 @@ inline void ExtractActivationTensor(const framework::ExecutionContext& context, PADDLE_ENFORCE(*Out != nullptr, "Cannot get output tensor Out, variable name = %s", - context.op().Output("Out")); + context.OutputName("Out")); } template @@ -93,18 +93,18 @@ inline void ExtractActivationGradTensor( out_var = context.InputVar("Out"); PADDLE_ENFORCE(out_var != nullptr, "Cannot get input Variable Out, variable name = %s", - context.op().Input("Out")); + context.InputName("Out")); } PADDLE_ENFORCE(out_grad_var != nullptr, "Cannot get input Variable %s, variable name = %s", framework::GradVarName("Out"), - context.op().Input(framework::GradVarName("Out"))); + context.InputName(framework::GradVarName("Out"))); PADDLE_ENFORCE(x_grad_var != nullptr, "Cannot get output Variable %s, variable name = %s", framework::GradVarName("X"), - context.op().Output(framework::GradVarName("X"))); + context.OutputName(framework::GradVarName("X"))); - if (CanBeUsedBySelectedRows.count(context.op().Type())) { + if (CanBeUsedBySelectedRows.count(context.Type())) { *dOut = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar( *out_grad_var); *dX = paddle::framework::GetMutableLoDTensorOrSelectedRowsValueFromVar( @@ -132,20 +132,20 @@ inline void ExtractActivationGradTensor( PADDLE_ENFORCE(*dX != nullptr, "Cannot get output tensor %s, variable name = %s", framework::GradVarName("X"), - context.op().Output(framework::GradVarName("X"))); + context.OutputName(framework::GradVarName("X"))); if (static_cast(kDepValue) & static_cast(kDepX)) { auto x_var = context.InputVar("X"); PADDLE_ENFORCE(x_var != nullptr, "Cannot get input tensor X, variable name = %s", - context.op().Input("X")); - if (CanBeUsedBySelectedRows.count(context.op().Type())) { + context.InputName("X")); + if (CanBeUsedBySelectedRows.count(context.Type())) { *X = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_var); } else { *X = context.Input("X"); } } else { - VLOG(10) << " Inplace activation of Op : " << context.op().Type(); + VLOG(10) << " Inplace activation of Op : " << context.Type(); *X = *dX; } } @@ -1273,8 +1273,8 @@ inline void ExtractActivationDoubleGradTensor( auto ddo_var = ctx.OutputVar("DDOut"); PADDLE_ENFORCE(ddx_var != nullptr, "Cannot get input Variable Out, variable name = %s", - ctx.op().Input("DDX")); - if (CanBeUsedBySelectedRows.count(ctx.op().Type())) { + ctx.InputName("DDX")); + if (CanBeUsedBySelectedRows.count(ctx.Type())) { *ddX = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*ddx_var); if (ddo_var) { *ddOut = paddle::framework::GetMutableLoDTensorOrSelectedRowsValueFromVar( @@ -1288,15 +1288,15 @@ inline void ExtractActivationDoubleGradTensor( } PADDLE_ENFORCE(*ddX != nullptr, "Cannot get output tensor DDX, variable name = %s", - ctx.op().Output("DDX")); + ctx.OutputName("DDX")); if (static_cast(kDepValue) & static_cast(kDepX)) { auto x_var = ctx.InputVar("X"); PADDLE_ENFORCE(x_var != nullptr, "Cannot get input Variable Out, variable name = %s", - ctx.op().Input("X")); + ctx.InputName("X")); auto dx_var = ctx.OutputVar("DX"); - if (CanBeUsedBySelectedRows.count(ctx.op().Type())) { + if (CanBeUsedBySelectedRows.count(ctx.Type())) { *X = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_var); if (dx_var) { *dX = paddle::framework::GetMutableLoDTensorOrSelectedRowsValueFromVar( @@ -1309,16 +1309,16 @@ inline void ExtractActivationDoubleGradTensor( } } } else { - VLOG(10) << "Inplace activation of Op: " << ctx.op().Type(); + VLOG(10) << "Inplace activation of Op: " << ctx.Type(); *X = *ddX; } if (static_cast(kDepValue) & static_cast(kDepOut)) { auto out_var = ctx.InputVar("Out"); PADDLE_ENFORCE(out_var != nullptr, "Cannot get input tensor Out, variable name = %s", - ctx.op().Input("Out")); + ctx.InputName("Out")); auto dout_var = ctx.OutputVar("DOut"); - if (CanBeUsedBySelectedRows.count(ctx.op().Type())) { + if (CanBeUsedBySelectedRows.count(ctx.Type())) { *Out = paddle::framework::GetLoDTensorOrSelectedRowsValueFromVar(*out_var); if (dout_var) { @@ -1333,7 +1333,7 @@ inline void ExtractActivationDoubleGradTensor( } } } else { - VLOG(10) << "Inplace activation of Op: " << ctx.op().Type(); + VLOG(10) << "Inplace activation of Op: " << ctx.Type(); *Out = *ddX; } } @@ -1471,20 +1471,20 @@ inline void ExtractDoubleGradTensorWithInputDOut( auto ddo_var = ctx.OutputVar("DDOut"); PADDLE_ENFORCE(ddx_var != nullptr, "Cannot get input Variable Out, variable name = %s", - ctx.op().Input("DDX")); + ctx.InputName("DDX")); *ddX = ctx.Input("DDX"); if (ddo_var) { *ddOut = ctx.Output("DDOut"); } PADDLE_ENFORCE(*ddX != nullptr, "Cannot get output tensor DDX, variable name = %s", - ctx.op().Output("DDX")); + ctx.OutputName("DDX")); // extract x(input), dx(output) auto x_var = ctx.InputVar("X"); PADDLE_ENFORCE(x_var != nullptr, "Cannot get input Variable Out, variable name = %s", - ctx.op().Input("X")); + ctx.InputName("X")); auto dx_var = ctx.OutputVar("DX"); *X = ctx.Input("X"); if (dx_var) { @@ -1537,20 +1537,20 @@ class SqrtDoubleGradKernel auto ddo_var = ctx.OutputVar("DDOut"); PADDLE_ENFORCE(ddx_var != nullptr, "Cannot get input Variable DDX, variable name = %s", - ctx.op().Input("DDX")); + ctx.InputName("DDX")); ddX = ctx.Input("DDX"); if (ddo_var) { ddOut = ctx.Output("DDOut"); } PADDLE_ENFORCE(ddX != nullptr, "Cannot get input Variable DDX, variable name = %s", - ctx.op().Input("DDX")); + ctx.InputName("DDX")); // extract out(input), dout(output) auto out_var = ctx.InputVar("Out"); PADDLE_ENFORCE(out_var != nullptr, "Cannot get input Variable Out, variable name = %s", - ctx.op().Input("Out")); + ctx.InputName("Out")); auto dout_var = ctx.OutputVar("DOut"); Out = ctx.Input("Out"); if (dout_var) { @@ -1561,7 +1561,7 @@ class SqrtDoubleGradKernel auto dx_var = ctx.InputVar("DX"); PADDLE_ENFORCE(dx_var != nullptr, "Cannot get input Variable DX, variable name = %s", - ctx.op().Input("DX")); + ctx.InputName("DX")); if (dx_var) { dX = ctx.Input("DX"); } diff --git a/paddle/fluid/operators/coalesce_tensor_op.cc b/paddle/fluid/operators/coalesce_tensor_op.cc index 886345919bc2d1a859e068001fd1586029a720f6..94a446a1c43c0745ef98d86fc8b2e115b9a19732 100644 --- a/paddle/fluid/operators/coalesce_tensor_op.cc +++ b/paddle/fluid/operators/coalesce_tensor_op.cc @@ -27,8 +27,8 @@ template class CoalesceTensorOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { - auto &in_var_names = context.Inputs("Input"); - auto &out_var_names = context.Outputs("Output"); + auto in_var_names = context.InputNames("Input"); + auto out_var_names = context.OutputNames("Output"); auto &in_vars = context.MultiInputVar("Input"); auto out_vars = context.MultiOutputVar("Output"); diff --git a/paddle/fluid/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc index ceb25fad2c2f48d3bfc8618d4f63332a1fcfc514..7d5667a3d58c61d11eb6acfbd49da12a8a56e009 100644 --- a/paddle/fluid/operators/concat_op.cc +++ b/paddle/fluid/operators/concat_op.cc @@ -32,6 +32,7 @@ class ConcatOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE_GE(ctx->Inputs("X").size(), 1UL, "Inputs(X) of ConcatOp should not be empty."); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true, "Output(Out) of ConcatOp should not be null."); @@ -152,17 +153,8 @@ class ConcatOpGrad : public framework::OperatorWithKernel { auto in_x = "X"; auto out_x_g_n = framework::GradVarName(in_x); ctx->SetOutputsDim(out_x_g_n, ctx->GetInputsDim(in_x)); - auto &in_names = ctx->Inputs(in_x); - auto &out_names = ctx->Outputs(out_x_g_n); - PADDLE_ENFORCE_EQ( - in_names.size(), out_names.size(), - "The number of arguments in %s[%d] and %s[%d] is not equal.", in_x, - in_names.size(), out_x_g_n, out_names.size()); - for (size_t i = 0; i < in_names.size(); ++i) { - if (out_names[i] != framework::kEmptyVarName) { - ctx->ShareLoD(in_x, out_x_g_n, i, i); - } - } + + ctx->ShareAllLoD(in_x, out_x_g_n); } protected: @@ -197,7 +189,9 @@ class ConcatGradOpMaker : public framework::SingleGradOpMaker { std::unique_ptr op(new T()); op->SetType("concat_grad"); op->SetInput("X", this->Input("X")); - op->SetInput("AxisTensor", this->Input("AxisTensor")); + if (this->HasInput("AxisTensor")) { + op->SetInput("AxisTensor", this->Input("AxisTensor")); + } op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); op->SetOutput(framework::GradVarName("X"), this->InputGrad("X", false)); op->SetAttrMap(this->Attrs()); diff --git a/paddle/fluid/operators/concat_op.h b/paddle/fluid/operators/concat_op.h index 12e383b1de7a3532a366faa703ebc31118b58c36..7c0fe3b635aef28efb4c8a365dba570b89b5d397 100644 --- a/paddle/fluid/operators/concat_op.h +++ b/paddle/fluid/operators/concat_op.h @@ -139,7 +139,7 @@ class ConcatGradKernel : public framework::OpKernel { auto* out_grad = ctx.Input(framework::GradVarName("Out")); auto ins = ctx.MultiInput("X"); - auto out_var_names = ctx.Outputs(framework::GradVarName("X")); + auto out_var_names = ctx.OutputNames(framework::GradVarName("X")); auto outs = ctx.MultiOutput(framework::GradVarName("X")); diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index 5500e6b0d79bcf833e4aae55595efe6caca6d0c6..caf3a9a4bc71c07352598aa5f4ba71c3ca578d39 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -665,7 +665,7 @@ class GemmConvDoubleGradKernel : public framework::OpKernel { Tensor* dX = ctx.Output("DInput"); Tensor W = detail::Ref(ctx.Input("Filter"), "Cannot find input Filter(%s) in scope)", - ctx.Inputs("Filter")[0]); + ctx.InputNames("Filter")[0]); if (!ddY && !dW && !dX) return; const int groups = ctx.Attr("groups"); diff --git a/paddle/fluid/operators/cudnn_lstm_op.cu.cc b/paddle/fluid/operators/cudnn_lstm_op.cu.cc index 1bf41ed948b5bd4fbd49587f072f5debfa81d77c..f0f9e8842d4c3d58d20455e1ff2377069bf5dbfc 100644 --- a/paddle/fluid/operators/cudnn_lstm_op.cu.cc +++ b/paddle/fluid/operators/cudnn_lstm_op.cu.cc @@ -62,7 +62,7 @@ class CudnnLSTMGPUKernel : public framework::OpKernel { // multi-devices before the first running. // use parent scope to make cache persistable auto *scope = const_cast(ctx.scope().parent()); - auto cache_var_name = ctx.Inputs("Cache")[0]; + auto cache_var_name = ctx.InputNames("Cache")[0]; cache_var = scope->Var(cache_var_name); } CudnnRNNCache *cudnn_rnn_cache = nullptr; diff --git a/paddle/fluid/operators/cum_op.h b/paddle/fluid/operators/cum_op.h index 7c0fda4169b5e1cf663d04b78b6425d73965c292..d158bd4dfe55eaeb23718a6596c4f80d018ac7b1 100644 --- a/paddle/fluid/operators/cum_op.h +++ b/paddle/fluid/operators/cum_op.h @@ -31,11 +31,11 @@ class CumKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto& X = detail::Ref(context.Input("X"), "Cannot get input tensor X, variable name = %s", - context.op().Input("X")); + context.InputName("X")); auto& Out = detail::Ref(context.Output("Out"), "Cannot get output tensor Out, variable name = %s", - context.op().Output("Out")); + context.OutputName("Out")); int axis = context.Attr("axis"); bool exclusive = context.Attr("exclusive"); bool reverse = context.Attr("reverse"); diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cc b/paddle/fluid/operators/detection/generate_proposals_op.cc index 1a9b62f4f6afd2e108e4c8927d5817d451eccc11..6d2312d3b6aa378c891c8f26d78d2362666dcf2d 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cc +++ b/paddle/fluid/operators/detection/generate_proposals_op.cc @@ -295,10 +295,10 @@ class GenerateProposalsKernel : public framework::OpKernel { auto *im_info = context.Input("ImInfo"); auto anchors = detail::Ref(context.Input("Anchors"), "Cannot find input Anchors(%s) in scope", - context.Inputs("Anchors")[0]); + context.InputNames("Anchors")[0]); auto variances = detail::Ref(context.Input("Variances"), "Cannot find input Variances(%s) in scope", - context.Inputs("Variances")[0]); + context.InputNames("Variances")[0]); auto *rpn_rois = context.Output("RpnRois"); auto *rpn_roi_probs = context.Output("RpnRoiProbs"); diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cu b/paddle/fluid/operators/detection/generate_proposals_op.cu index 43deb5f9f3871b69ca46b7908c56c1236c1c5595..9a25e205bc81b8f58ec77c283895d1c51b30927d 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cu +++ b/paddle/fluid/operators/detection/generate_proposals_op.cu @@ -369,10 +369,10 @@ class CUDAGenerateProposalsKernel : public framework::OpKernel { auto *im_info = context.Input("ImInfo"); auto anchors = detail::Ref(context.Input("Anchors"), "Cannot find input Anchors(%s) in scope", - context.Inputs("Anchors")[0]); + context.InputNames("Anchors")[0]); auto variances = detail::Ref(context.Input("Variances"), "Cannot find input Variances(%s) in scope", - context.Inputs("Variances")[0]); + context.InputNames("Variances")[0]); auto *rpn_rois = context.Output("RpnRois"); auto *rpn_roi_probs = context.Output("RpnRoiProbs"); diff --git a/paddle/fluid/operators/distributed_ops/broadcast_op.cu.cc b/paddle/fluid/operators/distributed_ops/broadcast_op.cu.cc index c9b40e6863f444983999e9d74a9efe288465fe27..5b1f917cc52fd49bfdce8f2b18989f3178a14be3 100644 --- a/paddle/fluid/operators/distributed_ops/broadcast_op.cu.cc +++ b/paddle/fluid/operators/distributed_ops/broadcast_op.cu.cc @@ -59,7 +59,7 @@ class NCCLBroadcastOpKernel : public framework::OpKernel { send_recv_buffer, static_cast(in->numel()), platform::ToNCCLDataType(in->type()), root_dev_id, comm, stream)); - VLOG(3) << "Bcast " << ctx.Inputs("X")[0] << ", (" << in->numel() << ")" + VLOG(3) << "Bcast " << ctx.InputNames("X")[0] << ", (" << in->numel() << ")" << " From " << root_dev_id << " to " << dev_id; if (ctx.Attr("sync_mode")) { diff --git a/paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.cc b/paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.cc index c34fb7b96f2377a8ca12f3488efa823ac012a5e0..403347d334b274c105b3f6c0e19c1150de824c85 100644 --- a/paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.cc +++ b/paddle/fluid/operators/distributed_ops/distributed_lookup_table_op.cc @@ -84,9 +84,9 @@ class DistributedLookupTableKernel : public framework::OpKernel { auto ids_vars = context.MultiInputVar("Ids"); auto emb_vars = context.MultiOutput("Embeddings"); - auto id_names = context.Inputs("Ids"); - auto embedding_name = context.Inputs("W").front(); - auto out_names = context.Outputs("Outputs"); + auto id_names = context.InputNames("Ids"); + auto embedding_name = context.InputNames("W").front(); + auto out_names = context.OutputNames("Outputs"); auto lookup_tables = context.Attr>("table_names"); auto height_sections = diff --git a/paddle/fluid/operators/distributed_ops/split_ids_op.h b/paddle/fluid/operators/distributed_ops/split_ids_op.h index 6676ecd1c85d70cd5961af2fb1537e77b10e41bc..8a75dd8062359a4e2fdbbeffb24c3b92d71b87bd 100644 --- a/paddle/fluid/operators/distributed_ops/split_ids_op.h +++ b/paddle/fluid/operators/distributed_ops/split_ids_op.h @@ -116,7 +116,7 @@ class SplitIdsOpKernel : public framework::OpKernel { } else { PADDLE_THROW( "% should be LoDTensor or SelectedRows, but the received type is %s", - ctx.Inputs("Ids")[0], framework::ToTypeName(ids_var->Type())); + ctx.InputNames("Ids")[0], framework::ToTypeName(ids_var->Type())); } } }; diff --git a/paddle/fluid/operators/elementwise/elementwise_mul_op.h b/paddle/fluid/operators/elementwise/elementwise_mul_op.h index e41ee0b741793b36349d5f1484ef150d68905251..586a8c1d87aa342ce9c6f318c420c4166ff2c7d6 100644 --- a/paddle/fluid/operators/elementwise/elementwise_mul_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_mul_op.h @@ -94,7 +94,7 @@ class ElementwiseMulKernel : public framework::OpKernel { auto x_var = ctx.InputVar("X"); PADDLE_ENFORCE(x_var != nullptr, "Cannot get input Variable X, variable name = %s", - ctx.op().Input("X")); + ctx.InputName("X")); auto* y = ctx.Input("Y"); framework::Tensor x, *z; diff --git a/paddle/fluid/operators/elementwise/elementwise_pow_op.h b/paddle/fluid/operators/elementwise/elementwise_pow_op.h index 1363485ced4e12bd7c67c04037ea3a2cd27b0e54..345eb2539523f623d00ee37fae3d4929fd5ba55a 100644 --- a/paddle/fluid/operators/elementwise/elementwise_pow_op.h +++ b/paddle/fluid/operators/elementwise/elementwise_pow_op.h @@ -31,7 +31,7 @@ class ElementwisePowKernel : public framework::OpKernel { auto* x = ctx.Input("X"); PADDLE_ENFORCE(x != nullptr, "Cannot get input Variable X, variable name = %s", - ctx.op().Input("X")); + ctx.InputName("X")); auto* y = ctx.Input("Y"); auto* z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); diff --git a/paddle/fluid/operators/elementwise/mkldnn/elementwise_add_mkldnn_op.cc b/paddle/fluid/operators/elementwise/mkldnn/elementwise_add_mkldnn_op.cc index b0037925c1b57895e0d7773762e1b19e7488b872..2fd38e7c95fb1ca3049be6acbaac7ff4a153a9c0 100644 --- a/paddle/fluid/operators/elementwise/mkldnn/elementwise_add_mkldnn_op.cc +++ b/paddle/fluid/operators/elementwise/mkldnn/elementwise_add_mkldnn_op.cc @@ -138,7 +138,7 @@ class EltwiseAddMKLDNNKernel : public framework::OpKernel { std::vector scales = {1.0f, 1.0f}; const std::string key = - platform::CreateKey(src_x_tz, ctx.op().Output("Out")); + platform::CreateKey(src_x_tz, ctx.OutputName("Out")); platform::SumMKLDNNHandler handler(dev_ctx, mkldnn_engine, key); diff --git a/paddle/fluid/operators/fill_constant_op.h b/paddle/fluid/operators/fill_constant_op.h index a972ff21173b224f4d9fa7a53d72855a87642ab0..6213565ea74e1f23dbba6edde70b03c64a74e7d2 100644 --- a/paddle/fluid/operators/fill_constant_op.h +++ b/paddle/fluid/operators/fill_constant_op.h @@ -76,6 +76,7 @@ class FillConstantKernel : public framework::OpKernel { void Compute(const paddle::framework::ExecutionContext &ctx) const override { auto data_type = static_cast(ctx.Attr("dtype")); + auto str_value = ctx.Attr("str_value"); auto float_value = ctx.Attr("value"); auto force_cpu = ctx.Attr("force_cpu"); diff --git a/paddle/fluid/operators/fill_op.h b/paddle/fluid/operators/fill_op.h index fa2d5b858d95bcafbdcbf975dea1e183444bf118..99700736c1b53aeb1595622df1931539d482c215 100644 --- a/paddle/fluid/operators/fill_op.h +++ b/paddle/fluid/operators/fill_op.h @@ -47,7 +47,7 @@ class FillKernel : public framework::OpKernel { auto &out = detail::Ref(ctx.Output("Out"), "Cannot get output lod tensor Out, variable name = %s", - ctx.op().Output("Out")); + ctx.OutputName("Out")); out.Resize(framework::make_ddim(ctx.Attr>("shape"))); auto dtype = static_cast(ctx.Attr("dtype")); diff --git a/paddle/fluid/operators/fused/fused_elemwise_activation_op.h b/paddle/fluid/operators/fused/fused_elemwise_activation_op.h index 7cb753211eab328680ed78c9f3aa5409f487dc41..62a6175e33fe063bd6e5efdd5e123b745770c1fb 100644 --- a/paddle/fluid/operators/fused/fused_elemwise_activation_op.h +++ b/paddle/fluid/operators/fused/fused_elemwise_activation_op.h @@ -385,10 +385,10 @@ class FusedElemwiseActivationKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext &ctx) const override { auto &in_x = detail::Ref(ctx.Input("X"), "Cannot get input tensor %s, variable name = %s", - "X", ctx.op().Input("X")); + "X", ctx.InputName("X")); auto &in_y = detail::Ref(ctx.Input("Y"), "Cannot get input tensor %s, variable name = %s", - "Y", ctx.op().Input("Y")); + "Y", ctx.InputName("Y")); PADDLE_ENFORCE(ctx.HasOutput("Out"), "The output(Out) should not be empty"); auto output = ctx.Output("Out"); diff --git a/paddle/fluid/operators/hierarchical_sigmoid_op.h b/paddle/fluid/operators/hierarchical_sigmoid_op.h index d20a7e96b105079b75d2cd8ab7e41a6abbb77258..daf99586f825c53704088d718773a220d434b7a9 100644 --- a/paddle/fluid/operators/hierarchical_sigmoid_op.h +++ b/paddle/fluid/operators/hierarchical_sigmoid_op.h @@ -97,7 +97,7 @@ class HierarchicalSigmoidOpKernel : public framework::OpKernel { #ifdef PADDLE_WITH_DISTRIBUTE // w_Out is set to used by prefetch, never change it in other cases - auto weight = ctx.Outputs("W_Out").front(); + auto weight = ctx.OutputNames("W_Out").front(); operators::distributed::prefetch("Ids@Prefetch", "W@Prefetch", weight, true, table_names, epmap, height_sections, ctx, local_scope); diff --git a/paddle/fluid/operators/huber_loss_op.h b/paddle/fluid/operators/huber_loss_op.h index 7000b5d3acc07d722c0129f86f0cfa2ee65b4288..93cfba196468449d1961c769afa5fe33090bdae7 100644 --- a/paddle/fluid/operators/huber_loss_op.h +++ b/paddle/fluid/operators/huber_loss_op.h @@ -94,7 +94,7 @@ class HuberLossGradKernel : public framework::OpKernel { auto* in1 = context.Input(framework::GradVarName("Out")); auto* out0 = context.Output(framework::GradVarName("X")); auto* out1 = context.Output(framework::GradVarName("Y")); - auto delta = static_cast(context.op().Attr("delta")); + auto delta = static_cast(context.Attr("delta")); auto& place = *context.template device_context().eigen_device(); diff --git a/paddle/fluid/operators/load_combine_op.h b/paddle/fluid/operators/load_combine_op.h index 9f6565ddf3aa71db4e7504e91f72c852984a39b7..59c46508c14b7ec1676d02b53c670d8d29c581df 100644 --- a/paddle/fluid/operators/load_combine_op.h +++ b/paddle/fluid/operators/load_combine_op.h @@ -33,7 +33,7 @@ class LoadCombineOpKernel : public framework::OpKernel { auto filename = ctx.Attr("file_path"); auto load_as_fp16 = ctx.Attr("load_as_fp16"); auto model_from_memory = ctx.Attr("model_from_memory"); - auto &out_var_names = ctx.Outputs("Out"); + auto out_var_names = ctx.OutputNames("Out"); PADDLE_ENFORCE_GT( static_cast(out_var_names.size()), 0, diff --git a/paddle/fluid/operators/load_op.h b/paddle/fluid/operators/load_op.h index 3bf3c6bed2f0ddf352a2bad65b0d710097016b28..7347fd5e05481156cf7bd84c55b9c6f55b175bce 100644 --- a/paddle/fluid/operators/load_op.h +++ b/paddle/fluid/operators/load_op.h @@ -36,7 +36,7 @@ class LoadOpKernel : public framework::OpKernel { PADDLE_ENFORCE(static_cast(fin), "Cannot open file %s for load op", filename); - auto out_var_name = ctx.Outputs("Out").data(); + auto out_var_name = ctx.OutputNames("Out").data(); auto *out_var = ctx.OutputVar("Out"); PADDLE_ENFORCE(out_var != nullptr, "Output variable %s cannot be found ", diff --git a/paddle/fluid/operators/lookup_table_op.cu b/paddle/fluid/operators/lookup_table_op.cu index 43e3457fd5d7e74a70cb6efee3e6a3b8c51a53cb..c62f7356a91cb043064367f1fdc8d5f5d3719198 100644 --- a/paddle/fluid/operators/lookup_table_op.cu +++ b/paddle/fluid/operators/lookup_table_op.cu @@ -94,8 +94,8 @@ class LookupTableCUDAKernel : public framework::OpKernel { auto *output_t = context.Output("Out"); int64_t padding_idx = context.Attr("padding_idx"); - auto id_name = context.Inputs("Ids").front(); - auto out_name = context.Outputs("Out").front(); + auto id_name = context.InputNames("Ids").front(); + auto out_name = context.OutputNames("Out").front(); size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; diff --git a/paddle/fluid/operators/lookup_table_op.h b/paddle/fluid/operators/lookup_table_op.h index 348fa52f38c3cc15ba1602b68485d987413ad609..e56988b83aea12d13c95f210730ffee4beeb2769 100644 --- a/paddle/fluid/operators/lookup_table_op.h +++ b/paddle/fluid/operators/lookup_table_op.h @@ -45,9 +45,9 @@ class LookupTableKernel : public framework::OpKernel { auto *output_t = context.Output("Out"); // float tensor auto *table_var = context.InputVar("W"); - auto id_name = context.Inputs("Ids").front(); - auto embedding_name = context.Inputs("W").front(); - auto out_name = context.Outputs("Out").front(); + auto id_name = context.InputNames("Ids").front(); + auto embedding_name = context.InputNames("W").front(); + auto out_name = context.OutputNames("Out").front(); // for remote prefetch auto epmap = context.Attr>("epmap"); diff --git a/paddle/fluid/operators/lookup_table_v2_op.cu b/paddle/fluid/operators/lookup_table_v2_op.cu index 8900a4af88cae2167a787325bb06df626610abaa..be41473dacf8aaee4503841927b5ab950e1f9afc 100644 --- a/paddle/fluid/operators/lookup_table_v2_op.cu +++ b/paddle/fluid/operators/lookup_table_v2_op.cu @@ -94,8 +94,8 @@ class LookupTableV2CUDAKernel : public framework::OpKernel { auto *output_t = context.Output("Out"); int64_t padding_idx = context.Attr("padding_idx"); - auto id_name = context.Inputs("Ids").front(); - auto out_name = context.Outputs("Out").front(); + auto id_name = context.InputNames("Ids").front(); + auto out_name = context.OutputNames("Out").front(); size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; diff --git a/paddle/fluid/operators/lookup_table_v2_op.h b/paddle/fluid/operators/lookup_table_v2_op.h index 1db3bc9750d5ed974c0f17881a8bd1b2d88b936c..19838ceeae8aa788645c658bcd745f3f7325a1d8 100644 --- a/paddle/fluid/operators/lookup_table_v2_op.h +++ b/paddle/fluid/operators/lookup_table_v2_op.h @@ -45,9 +45,9 @@ class LookupTableV2Kernel : public framework::OpKernel { auto *output_t = context.Output("Out"); // float tensor auto *table_var = context.InputVar("W"); - auto id_name = context.Inputs("Ids").front(); - auto embedding_name = context.Inputs("W").front(); - auto out_name = context.Outputs("Out").front(); + auto id_name = context.InputNames("Ids").front(); + auto embedding_name = context.InputNames("W").front(); + auto out_name = context.OutputNames("Out").front(); // for remote prefetch auto epmap = context.Attr>("epmap"); diff --git a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc index 414576f1a29d54b5a467be507ef34c76cff3b7ec..b9a0a7915dab9965e51e687983616cdf38f249b9 100644 --- a/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/activation_mkldnn_op.cc @@ -73,8 +73,8 @@ void eltwise_forward(const framework::ExecutionContext &ctx, const auto *x = ctx.Input("X"); auto *y = ctx.Output("Out"); - const T alpha = ctx.op().HasAttr("alpha") ? ctx.Attr("alpha") : 0; - const T beta = ctx.op().HasAttr("beta") ? ctx.Attr("beta") : 0; + const T alpha = ctx.HasAttr("alpha") ? ctx.Attr("alpha") : 0; + const T beta = ctx.HasAttr("beta") ? ctx.Attr("beta") : 0; PADDLE_ENFORCE( x->dims().size() == 2 || x->dims().size() == 3 || x->dims().size() == 4, @@ -88,7 +88,7 @@ void eltwise_forward(const framework::ExecutionContext &ctx, platform::ActivationMKLDNNHandler handler( src_tz, algorithm, alpha, beta, src_format, is_test, dev_ctx, - ctx.GetPlace(), ctx.op().Input("X")); + ctx.GetPlace(), ctx.InputName("X")); auto src_memory_p = handler.AcquireSrcMemory(x); auto dst_memory_p = handler.AcquireDstMemory(y); @@ -113,8 +113,8 @@ void eltwise_grad(const framework::ExecutionContext &ctx, const auto *diff_y = ctx.Input(framework::GradVarName("Out")); auto *diff_x = ctx.Output(framework::GradVarName("X")); - const T alpha = ctx.op().HasAttr("alpha") ? ctx.Attr("alpha") : 0; - const T beta = ctx.op().HasAttr("beta") ? ctx.Attr("beta") : 0; + const T alpha = ctx.HasAttr("alpha") ? ctx.Attr("alpha") : 0; + const T beta = ctx.HasAttr("beta") ? ctx.Attr("beta") : 0; auto diff_dst_tz = framework::vectorize(diff_y->dims()); @@ -127,7 +127,7 @@ void eltwise_grad(const framework::ExecutionContext &ctx, platform::ActivationMKLDNNHandler handler( diff_dst_tz, algorithm, alpha, beta, src_format, diff_y_format, dev_ctx, - ctx.GetPlace(), ctx.op().Input("X")); + ctx.GetPlace(), ctx.InputName("X")); auto src_memory_p = handler.AcquireBackwardSrcMemory(x); auto diff_dst_memory_p = handler.AcquireDiffDstMemory(diff_y); diff --git a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc index 189f512d31694ecba3021319039a75049d659335..ad51de386ed30f369c8d90895db8e90b2f3f8b13 100644 --- a/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/batch_norm_mkldnn_op.cc @@ -149,6 +149,7 @@ class BatchNormMKLDNNOpKernel : public paddle::framework::OpKernel { const unsigned int C = scale_tz[0]; // MKLDNN requires a single piece of memory for scale and shift/bias data + std::vector scaleshift_data(scale->data(), scale->data() + C); scaleshift_data.reserve(2 * C); scaleshift_data.insert(scaleshift_data.end(), shift->data(), @@ -162,7 +163,7 @@ class BatchNormMKLDNNOpKernel : public paddle::framework::OpKernel { BatchNormMKLDNNHandler handler( src_tz, epsilon, flags, global_stats, platform::MKLDNNFormatForSize(src_tz.size(), x->format()), dev_ctx, - ctx.GetPlace(), ctx.op().Output("SavedMean")); + ctx.GetPlace(), ctx.OutputName("SavedMean")); auto src_memory = handler.AcquireSrcMemory(x); auto scaleshift_memory = @@ -261,7 +262,7 @@ class BatchNormMKLDNNGradOpKernel : public paddle::framework::OpKernel { BatchNormMKLDNNHandler handler( src_tz, epsilon, mkldnn::use_scale_shift, dst_format, input_format, - dev_ctx, ctx.GetPlace(), ctx.op().Input("SavedMean")); + dev_ctx, ctx.GetPlace(), ctx.InputName("SavedMean")); // MKLDNN requires a single piece of memory for scale and shift/bias data const size_t scaleshift_size = 2 * C; diff --git a/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc index 4baf65fb7426eddfeb4bc70fde1124c31c77ed90..e51db0208ef3edc9ad48ec8dcfe7900b0a88a504 100644 --- a/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/concat_mkldnn_op.cc @@ -151,7 +151,7 @@ class ConcatMKLDNNOpKernel : public paddle::framework::OpKernel { ConcatPrimitiveFactory prim_creator; std::string key = platform::CreateKey( paddle::framework::vectorize(multi_input[0]->dims()), - ctx.op().Output("Out"), dt, platform::ThreadIDasStr()); + ctx.OutputName("Out"), dt, platform::ThreadIDasStr()); const std::string key_prim = key + "@concat_p"; const std::string key_concat_pd = key + "@concat_pd"; const std::string key_srcs = key + "@concat_srcs"; diff --git a/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc index 5bb362d475b0a9621764be201f7eba2d7b7470ba..f9ca40f870b44e61469fccb54b339cb6ce98be63 100644 --- a/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_mkldnn_op.cc @@ -203,7 +203,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { // Get unique name for storing MKLDNN primitives const std::string key = platform::CreateKey( - src_tz, ctx.op().Input("Input") + ctx.op().Input("Filter")); + src_tz, ctx.InputName("Input") + ctx.InputName("Filter")); std::vector pipeline; @@ -377,7 +377,7 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { paddle::framework::ToMKLDNNDataType(input->type()); std::string key = platform::CreateKey( - src_tz, src_dt, ctx.op().Input("Input") + ctx.op().Input("Filter")); + src_tz, src_dt, ctx.InputName("Input") + ctx.InputName("Filter")); const std::string key_conv_pd = key + "@conv_pd"; @@ -755,7 +755,7 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { // as well as attributes of primitive to be created // This name will be used as key when saving info into device context const std::string key = platform::CreateKey( - src_tz, ctx.op().Input("Input") + ctx.op().Input("Filter")); + src_tz, ctx.InputName("Input") + ctx.InputName("Filter")); const std::string key_conv_pd = key + "@conv_pd"; std::vector pipeline; diff --git a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc index 49f4327f4f8fc6af30ffe55f27831d6832c3c0ac..74bc1eb2865ef91ddcb6d9b8f9bdbbfe19a9e514 100644 --- a/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/conv_transpose_mkldnn_op.cc @@ -140,8 +140,9 @@ class ConvTransposeMKLDNNOpKernel : public paddle::framework::OpKernel { auto dst_tz = paddle::framework::vectorize(output->dims()); // Get unique name for storing MKLDNN primitives + const std::string key = - platform::CreateKey(src_tz, ctx.op().Output("Output")); + platform::CreateKey(src_tz, ctx.OutputName("Output")); std::vector pipeline; diff --git a/paddle/fluid/operators/mkldnn/dequantize_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/dequantize_mkldnn_op.cc index a9f8ed74c3da5faf9744e86ba0e349d30eeae980..9a79feb8a58c580277aa8b3d44f7f54b9f268f9e 100644 --- a/paddle/fluid/operators/mkldnn/dequantize_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/dequantize_mkldnn_op.cc @@ -53,7 +53,7 @@ class DeQuantOpKernel : public framework::OpKernel { paddle::framework::ToMKLDNNDataType(input->type()); MKLDNNMemoryFormat src_fmt = input->format(); std::string key = - platform::CreateKey(src_dt, src_tz, ctx.op().Output("Output")); + platform::CreateKey(src_dt, src_tz, ctx.OutputName("Output")); const std::string key_prim = key + "@reorder_p"; const std::string key_src_mem = key + "@src_mem"; const std::string key_dst_mem = key + "@dst_mem"; diff --git a/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc index 458d56e40239bfea50f27cd8ebe69f1c12be83fe..dbf954213cb45b4a0b99bb6a55a515a2bc7b566c 100644 --- a/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/fc_mkldnn_op.cc @@ -404,7 +404,7 @@ GetPrimitiveFactory(const MKLDNNDeviceContext& dev_ctx, const mkldnn::engine& mkldnn_engine) { const std::string key = platform::CreateKey( platform::ThreadIDasStr(), input->format(), - framework::vectorize(weights->dims()), ctx.op().Output("Out")); + framework::vectorize(weights->dims()), ctx.OutputName("Out")); auto prim_creator = std::static_pointer_cast>( diff --git a/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc index ef922c35b86b545f1aed6eb1a002db39d239a93b..29301e6c679a13a7fff46d3740a4999e3901e59a 100644 --- a/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/lrn_mkldnn_op.cc @@ -53,7 +53,7 @@ class LRNMKLDNNOpKernel : public paddle::framework::OpKernel { platform::LRNMKLDNNHandler handler(dims, n, alpha, beta, k, x->format(), is_test, dev_ctx, ctx.GetPlace(), - ctx.op().Output("Out")); + ctx.OutputName("Out")); auto src_memory = handler.AcquireSrcMemory(x); auto dst_memory = handler.AcquireDstMemory(out); @@ -109,9 +109,9 @@ class LRNMKLDNNGradOpKernel : public paddle::framework::OpKernel { auto dims = paddle::framework::vectorize(x->dims()); - platform::LRNMKLDNNHandler handler( - dims, n, alpha, beta, k, x->format(), out_grad->format(), dev_ctx, - ctx.GetPlace(), ctx.op().Input("Out")); + platform::LRNMKLDNNHandler handler(dims, n, alpha, beta, k, x->format(), + out_grad->format(), dev_ctx, + ctx.GetPlace(), ctx.InputName("Out")); auto src_memory = handler.AcquireSrcMemory(x); auto workspace = handler.AcquireBackwardWorkspaceMemory(mid); diff --git a/paddle/fluid/operators/mkldnn/mkldnn_activation_op.h b/paddle/fluid/operators/mkldnn/mkldnn_activation_op.h index 85664623d7330e9473286d995bec67879510dbd7..aa34a092bd6bbaa3b0700b1cab11a69b2f2beff4 100644 --- a/paddle/fluid/operators/mkldnn/mkldnn_activation_op.h +++ b/paddle/fluid/operators/mkldnn/mkldnn_activation_op.h @@ -33,10 +33,10 @@ class MKLDNNActivationKernel void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE(context.Input("X") != nullptr, "Cannot get input tensor X, variable name = %s", - context.op().Input("X")); + context.InputName("X")); PADDLE_ENFORCE(context.Output("Out") != nullptr, "Cannot find output tensor Out, variable name = %s", - context.op().Output("Out")); + context.OutputName("Out")); Functor functor; auto attrs = functor.GetAttrs(); diff --git a/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc index 7d41960214d42b7592e866e8d31cb5aa0271f90f..b9547e24a1672d1ba33977696a439d5879f8e56d 100644 --- a/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/mul_mkldnn_op.cc @@ -342,7 +342,7 @@ std::shared_ptr> GetPrimitiveFactory( const std::string key = platform::CreateKey( input_x->type(), framework::vectorize(input_x->dims()), input_y->type(), framework::vectorize(input_y->dims()), - ctx.op().Output("Out")); + ctx.OutputName("Out")); auto prim_creator = std::static_pointer_cast>( dev_ctx.GetBlob(key)); diff --git a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc index c578e69821ff9e491461ab158507b4064d2f568d..866a4319dcffd57fb7bba2f803788fd134317ef3 100644 --- a/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/pool_mkldnn_op.cc @@ -80,7 +80,7 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { src_tz, dst_tz, ksize, strides, paddings, pooling_type, ctx.Attr("ceil_mode"), input->format(), paddle::framework::ToMKLDNNDataType(input->type()), is_test, dev_ctx, - ctx.GetPlace(), ctx.op().Output("Out"), ctx.Attr("exclusive")); + ctx.GetPlace(), ctx.OutputName("Out"), ctx.Attr("exclusive")); auto src_memory = handler.AcquireSrcMemory(input); auto dst_memory = handler.AcquireDstMemory(output); @@ -162,13 +162,13 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel { // This name will be used as key when referring info from device context const std::string key = platform::CreateKey( diff_src_tz, pooling_type, ksize, strides, paddings, - memory::data_type::f32, in_x->format(), ctx.op().Input("Out")); + memory::data_type::f32, in_x->format(), ctx.InputName("Out")); platform::PoolingMKLDNNHandler handler( diff_dst_tz, diff_src_tz, ksize, strides, paddings, pooling_type, ctx.Attr("ceil_mode"), in_x->format(), out_grad->format(), paddle::framework::ToMKLDNNDataType(out_grad->type()), dev_ctx, - ctx.GetPlace(), ctx.op().Input("Out"), ctx.Attr("exclusive")); + ctx.GetPlace(), ctx.InputName("Out"), ctx.Attr("exclusive")); auto diff_dst_memory = handler.AcquireDiffDstMemory(out_grad); auto diff_src_memory = handler.AcquireDiffSrcMemory(in_x_grad); diff --git a/paddle/fluid/operators/mkldnn/quantize_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/quantize_mkldnn_op.cc index 788e3f279318f34f85e9b1f006595beb24bdd88e..f4c68181b6c3bec014e9889ac64b4b1f58d49bea 100644 --- a/paddle/fluid/operators/mkldnn/quantize_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/quantize_mkldnn_op.cc @@ -49,7 +49,7 @@ class QuantOpKernel : public framework::OpKernel { bool is_negative = ctx.Attr("is_negative_input"); std::string key = platform::CreateKey(src_tz, scale_data, is_negative, - ctx.op().Output("Output")); + ctx.OutputName("Output")); const std::string key_prim = key + "@reorder_p"; const std::string key_src_mem = key + "@src_mem"; const std::string key_dst_mem = key + "@dst_mem"; diff --git a/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc index f6b0a9ac528e57d2f131fa2e974ff1d0dbb620a0..08ead5f0c615fdaacdee52313dd5565f000c19dd 100644 --- a/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/softmax_mkldnn_op.cc @@ -90,7 +90,7 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel { auto softmax_tz = paddle::framework::vectorize(dims); SoftmaxMKLDNNHandler handler(softmax_tz, input->format(), axis, dev_ctx, - ctx.GetPlace(), ctx.op().Output("Out")); + ctx.GetPlace(), ctx.OutputName("Out")); auto softmax_src_memory_p = handler.AcquireSrcMemory(input); auto softmax_dst_memory_p = handler.AcquireDstMemory(output); @@ -140,7 +140,7 @@ class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel { SoftmaxMKLDNNHandler handler(softmax_tz, output->format(), dout->format(), axis, dev_ctx, - ctx.GetPlace(), ctx.op().Input("Out")); + ctx.GetPlace(), ctx.InputName("Out")); auto dst_memory_p = handler.AcquireDstMemory(output); auto diff_dst_memory_p = handler.AcquireDiffDstMemory(dout); diff --git a/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc index 6c4206f910383a9c39684883440501035adba871..065707cac28ab86d1ddcc21b58771cc21c01b088 100644 --- a/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc +++ b/paddle/fluid/operators/mkldnn/transpose_mkldnn_op.cc @@ -46,8 +46,7 @@ class TransposeMKLDNNOpKernel : public paddle::framework::OpKernel { auto nchw_tz = paddle::framework::vectorize(input->dims()); - const std::string key = - platform::CreateKey(nchw_tz, ctx.op().Output("Out")); + const std::string key = platform::CreateKey(nchw_tz, ctx.OutputName("Out")); platform::TransposeMKLDNNHandler handler(nchw_tz, axis, dev_ctx, mkldnn_engine, key); @@ -99,7 +98,7 @@ class TransposeMKLDNNGradOpKernel : public paddle::framework::OpKernel { auto nchw_tz = paddle::framework::vectorize(out_grad->dims()); const std::string key = platform::CreateKey( - nchw_tz, ctx.op().Output(framework::GradVarName("X"))); + nchw_tz, ctx.OutputName(framework::GradVarName("X"))); platform::TransposeMKLDNNHandler handler(nchw_tz, reversed_axis, dev_ctx, mkldnn_engine, key); diff --git a/paddle/fluid/operators/multiplex_op.cc b/paddle/fluid/operators/multiplex_op.cc index 961a80398a252b5b0dcce9e09ee240044780ef1f..5ff6a39e43d9cfc1e14f76edc7fe304b316c4445 100644 --- a/paddle/fluid/operators/multiplex_op.cc +++ b/paddle/fluid/operators/multiplex_op.cc @@ -114,7 +114,7 @@ class MultiplexGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - auto& dxs = ctx->Outputs(framework::GradVarName("X")); + auto dxs = ctx->Outputs(framework::GradVarName("X")); PADDLE_ENFORCE(!dxs.empty(), "Output(X@Grad) should not be null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null."); diff --git a/paddle/fluid/operators/nce_op.h b/paddle/fluid/operators/nce_op.h index f66d046402cbacea690fe96c2877aec9371b6bd6..f6f00c1583af439fb2bbbb43c4dd34c05325f531 100644 --- a/paddle/fluid/operators/nce_op.h +++ b/paddle/fluid/operators/nce_op.h @@ -217,7 +217,7 @@ class NCEKernel : public framework::OpKernel { w_tensor->Resize(framework::make_ddim(w_dims)); #ifdef PADDLE_WITH_DISTRIBUTE - auto weight = context.Inputs("Weight").front(); + auto weight = context.InputNames("Weight").front(); operators::distributed::prefetch("Ids@Prefetch", "Weight@Prefetch", weight, false, table_names, epmap, height_sections, context, local_scope); diff --git a/paddle/fluid/operators/ngraph/ngraph_engine.cc b/paddle/fluid/operators/ngraph/ngraph_engine.cc index 3c53c87c6ff4795c28be9eedc2f3e870e0a20916..b37ccf3f0d9b9c4b0812df349d7aeb31a1a75257 100644 --- a/paddle/fluid/operators/ngraph/ngraph_engine.cc +++ b/paddle/fluid/operators/ngraph/ngraph_engine.cc @@ -305,13 +305,13 @@ void NgraphEngine::Prepare(const framework::ExecutionContext& ctx) { ++idx; } - auto input_vars = ctx.Inputs("Xs"); + auto input_vars = ctx.InputNames("Xs"); if (!input_vars.empty()) { feed_vars = input_vars; var_in_ = input_vars; } - auto output_vars = ctx.Outputs("Ys"); + auto output_vars = ctx.OutputNames("Ys"); if (!output_vars.empty()) { var_out_ = output_vars; } diff --git a/paddle/fluid/operators/optimizers/adadelta_op.h b/paddle/fluid/operators/optimizers/adadelta_op.h index 3f51bb0b3d6ddf41a08a64f254f76c88b60ced22..e66dec7cf0ff686f91103e438b6374fce29af774 100644 --- a/paddle/fluid/operators/optimizers/adadelta_op.h +++ b/paddle/fluid/operators/optimizers/adadelta_op.h @@ -27,13 +27,13 @@ class AdadeltaOpKernel : public framework::OpKernel { PADDLE_ENFORCE(param_var->IsType(), "The Var(%s)'s type should be LoDTensor, " "but the received is %s", - ctx.Inputs("Param").front(), + ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type())); const auto* grad_var = ctx.InputVar("Grad"); PADDLE_ENFORCE(grad_var->IsType(), "The Var(%s)'s type should be LoDTensor, " "but the received is %s", - ctx.Inputs("Grad").front(), + ctx.InputNames("Grad").front(), framework::ToTypeName(grad_var->Type())); auto param_out_tensor = ctx.Output("ParamOut"); diff --git a/paddle/fluid/operators/optimizers/adagrad_op.h b/paddle/fluid/operators/optimizers/adagrad_op.h index 13455fc42cdc72a8ebfcac3dc0c94b79497d91f6..5d80102257656acc29a25d3973c6e10b4f51e48f 100644 --- a/paddle/fluid/operators/optimizers/adagrad_op.h +++ b/paddle/fluid/operators/optimizers/adagrad_op.h @@ -50,7 +50,7 @@ class AdagradOpKernel : public framework::OpKernel { PADDLE_ENFORCE(param_var->IsType(), "The Var(%s)'s type should be LoDTensor, " "but the received is %s", - ctx.Inputs("Param").front(), + ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type())); auto *param_out_tensor = ctx.Output("ParamOut"); diff --git a/paddle/fluid/operators/optimizers/adam_op.h b/paddle/fluid/operators/optimizers/adam_op.h index 95e4d22b06fb766ca8aa64307d1890d84f1ae3f0..96c12ad1364b14cf39ae486ad586cd907670798e 100644 --- a/paddle/fluid/operators/optimizers/adam_op.h +++ b/paddle/fluid/operators/optimizers/adam_op.h @@ -368,7 +368,7 @@ class AdamOpKernel : public framework::OpKernel { PADDLE_ENFORCE(param_var->IsType(), "The Var(%s)'s type should be LoDTensor, " "but the received is %s", - ctx.Inputs("Param").front(), + ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type())); using paddle::framework::LoDTensor; diff --git a/paddle/fluid/operators/optimizers/adamax_op.h b/paddle/fluid/operators/optimizers/adamax_op.h index 55d25ecbddf175c0c9ba2c68ef2f6c7b83dcf32e..b1c869532810e69bb527d8fabdd900a2a6642ab1 100644 --- a/paddle/fluid/operators/optimizers/adamax_op.h +++ b/paddle/fluid/operators/optimizers/adamax_op.h @@ -27,13 +27,13 @@ class AdamaxOpKernel : public framework::OpKernel { PADDLE_ENFORCE(param_var->IsType(), "The Var(%s)'s type should be LoDTensor, " "but the received is %s", - ctx.Inputs("Param").front(), + ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type())); const auto* grad_var = ctx.InputVar("Grad"); PADDLE_ENFORCE(grad_var->IsType(), "The Var(%s)'s type should be LoDTensor, " "but the received is %s", - ctx.Inputs("Grad").front(), + ctx.InputNames("Grad").front(), framework::ToTypeName(grad_var->Type())); auto param_out_tensor = ctx.Output("ParamOut"); diff --git a/paddle/fluid/operators/optimizers/decayed_adagrad_op.h b/paddle/fluid/operators/optimizers/decayed_adagrad_op.h index 4abd436927707f1a18039c9104a92b2a0bf3c982..279edfb015c26848d4078975a40bdca650bdc6a0 100644 --- a/paddle/fluid/operators/optimizers/decayed_adagrad_op.h +++ b/paddle/fluid/operators/optimizers/decayed_adagrad_op.h @@ -27,13 +27,13 @@ class DecayedAdagradOpKernel : public framework::OpKernel { PADDLE_ENFORCE(param_var->IsType(), "The Var(%s)'s type should be LoDTensor, " "but the received is %s", - ctx.Inputs("Param").front(), + ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type())); const auto* grad_var = ctx.InputVar("Grad"); PADDLE_ENFORCE(grad_var->IsType(), "The Var(%s)'s type should be LoDTensor, " "but the received is %s", - ctx.Inputs("Grad").front(), + ctx.InputNames("Grad").front(), framework::ToTypeName(grad_var->Type())); auto param_out_tensor = ctx.Output("ParamOut"); diff --git a/paddle/fluid/operators/optimizers/dpsgd_op.h b/paddle/fluid/operators/optimizers/dpsgd_op.h index 4eba7fed7e98cdb2065ed8245eca898388f23d0f..171691613bb872b456eac4d33cc96529b564a854 100644 --- a/paddle/fluid/operators/optimizers/dpsgd_op.h +++ b/paddle/fluid/operators/optimizers/dpsgd_op.h @@ -30,14 +30,14 @@ class DpsgdOpKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ(param_var->IsType(), true, "The Var(%s)'s type should be LoDTensor, " "but the received is %s", - ctx.Inputs("Param").front(), + ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type())); const auto *grad_var = ctx.InputVar("Grad"); PADDLE_ENFORCE_EQ(grad_var->IsType(), true, "The Var(%s)'s type should be LoDTensor, " "but the received is %s", - ctx.Inputs("Grad").front(), + ctx.InputNames("Grad").front(), framework::ToTypeName(grad_var->Type())); const auto *learning_rate = ctx.Input("LearningRate"); diff --git a/paddle/fluid/operators/optimizers/ftrl_op.h b/paddle/fluid/operators/optimizers/ftrl_op.h index bbf34d8316b09a78c334b0d79b132639be8af4f7..53799d99a9f03c2679b9c4c7dce99ab56d92d23a 100644 --- a/paddle/fluid/operators/optimizers/ftrl_op.h +++ b/paddle/fluid/operators/optimizers/ftrl_op.h @@ -32,13 +32,13 @@ class FTRLOpKernel : public framework::OpKernel { PADDLE_ENFORCE(param_var->IsType(), "The Var(%s)'s type should be LoDTensor, " "but the received is %s", - ctx.Inputs("Param").front(), + ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type())); const auto* grad_var = ctx.InputVar("Grad"); PADDLE_ENFORCE(grad_var->IsType(), "The Var(%s)'s type should be LoDTensor, " "but the received is %s", - ctx.Inputs("Grad").front(), + ctx.InputNames("Grad").front(), framework::ToTypeName(grad_var->Type())); auto* param_out = ctx.Output("ParamOut"); diff --git a/paddle/fluid/operators/optimizers/lamb_op.h b/paddle/fluid/operators/optimizers/lamb_op.h index 082235599015dfef272e5a830e41e225ecc13a66..e6d518a4f731f806d7a4271d58d24ae1dcca11c3 100644 --- a/paddle/fluid/operators/optimizers/lamb_op.h +++ b/paddle/fluid/operators/optimizers/lamb_op.h @@ -181,7 +181,7 @@ class LambOpKernel : public framework::OpKernel { PADDLE_ENFORCE(param_var->IsType(), "The Var(%s)'s type should be LoDTensor, " "but the received is %s", - ctx.Inputs("Param").front(), + ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type())); using paddle::framework::LoDTensor; diff --git a/paddle/fluid/operators/optimizers/sgd_op.cu b/paddle/fluid/operators/optimizers/sgd_op.cu index ebe7814aa7e2ce25ff955e5d3b8a6fe0c11f897f..96eb51903f015478e02e7bd8d9dd8cfcc5d93ee2 100644 --- a/paddle/fluid/operators/optimizers/sgd_op.cu +++ b/paddle/fluid/operators/optimizers/sgd_op.cu @@ -61,7 +61,7 @@ class SGDOpKernel PADDLE_ENFORCE(param_var->IsType(), "The Var(%s)'s type should be LoDTensor, " "but the received is %s", - ctx.Inputs("Param").front(), + ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type())); auto* param = ctx.Input("Param"); diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index 9c4cb63537f7e7aaee52ced013aceb55a3c14863..3772ad319ea0934c1b14cea81e53b163914118aa 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -431,7 +431,9 @@ class Reshape2GradMaker : public framework::SingleGradOpMaker { auto *grad_op = new T(); grad_op->SetType("reshape2_grad"); grad_op->SetInput("XShape", this->Output("XShape")); - grad_op->SetInput("ShapeTensor", this->Input("ShapeTensor")); + if (this->HasInput("ShapeTensor")) { + grad_op->SetInput("ShapeTensor", this->Input("ShapeTensor")); + } grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); grad_op->SetAttrMap(this->Attrs()); diff --git a/paddle/fluid/operators/save_combine_op.h b/paddle/fluid/operators/save_combine_op.h index 4ee82e17dd5e8173ce7dfb5c248890912d2cc7ef..66648fffd4bd7929fb5655207b424f5b9f79253b 100644 --- a/paddle/fluid/operators/save_combine_op.h +++ b/paddle/fluid/operators/save_combine_op.h @@ -50,7 +50,7 @@ class SaveCombineOpKernel : public framework::OpKernel { PADDLE_ENFORCE(static_cast(fout), "Cannot open %s to write", filename); - auto &inp_var_names = ctx.Inputs("X"); + auto inp_var_names = ctx.InputNames("X"); auto &inp_vars = ctx.MultiInputVar("X"); PADDLE_ENFORCE_GT(static_cast(inp_var_names.size()), 0, "The number of input variables should be greater than 0"); diff --git a/paddle/fluid/operators/save_op.h b/paddle/fluid/operators/save_op.h index b59421cb9e08e343a507210316be0d9b06192c49..ebb2099f0cbb4edf6611caf786514d1541e09e24 100644 --- a/paddle/fluid/operators/save_op.h +++ b/paddle/fluid/operators/save_op.h @@ -40,7 +40,7 @@ class SaveOpKernel : public framework::OpKernel { auto place = ctx.GetPlace(); auto *input_var = ctx.InputVar("X"); - auto iname = ctx.Inputs("X").data(); + auto iname = ctx.InputNames("X").data(); PADDLE_ENFORCE(input_var != nullptr, "Cannot find variable %s for save_op", iname); diff --git a/paddle/fluid/operators/slice_op.cc b/paddle/fluid/operators/slice_op.cc index 4caa07dff87a6e32723f86efdd4e706b70b152b8..9d96f9670c633f1937f4fff4065bd219cea930df 100644 --- a/paddle/fluid/operators/slice_op.cc +++ b/paddle/fluid/operators/slice_op.cc @@ -271,10 +271,18 @@ class SliceOpGradMaker : public framework::SingleGradOpMaker { std::unique_ptr Apply() const override { auto *bind = new T(); bind->SetInput("Input", this->Input("Input")); - bind->SetInput("StartsTensor", this->Input("StartsTensor")); - bind->SetInput("EndsTensor", this->Input("EndsTensor")); - bind->SetInput("StartsTensorList", this->Input("StartsTensorList")); - bind->SetInput("EndsTensorList", this->Input("EndsTensorList")); + if (this->HasInput("StartsTensor")) { + bind->SetInput("StartsTensor", this->Input("StartsTensor")); + } + if (this->HasInput("EndsTensor")) { + bind->SetInput("EndsTensor", this->Input("EndsTensor")); + } + if (this->HasInput("StartsTensorList")) { + bind->SetInput("StartsTensorList", this->Input("StartsTensorList")); + } + if (this->HasInput("EndsTensorList")) { + bind->SetInput("EndsTensorList", this->Input("EndsTensorList")); + } bind->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); bind->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input")); bind->SetAttrMap(this->Attrs()); diff --git a/paddle/fluid/operators/split_op.h b/paddle/fluid/operators/split_op.h index 3bad392e1ba0a6fd8b87071c5d055e58997b5847..9788a578ed04d41fa34be48315213bd3276f0823 100644 --- a/paddle/fluid/operators/split_op.h +++ b/paddle/fluid/operators/split_op.h @@ -163,7 +163,9 @@ class SplitGradMaker : public framework::SingleGradOpMaker { auto op = new T(); op->SetType("concat"); op->SetInput("X", this->OutputGrad("Out")); - op->SetInput("AxisTensor", this->Input("AxisTensor")); + if (this->HasInput("AxisTensor")) { + op->SetInput("AxisTensor", this->Input("AxisTensor")); + } op->SetOutput("Out", this->InputGrad("X")); op->SetAttrMap(this->Attrs()); return std::unique_ptr(op); diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index 33eef7b075cbe4e48a1bca5e8c3905cba59e5d1d..31a8d3f430b4a5429ecf9505ea2a6ab622f46942 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -108,7 +108,7 @@ class SumOp : public framework::OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { auto x_vars = ctx.MultiInputVar("X"); - auto x_vars_name = ctx.Inputs("X"); + auto x_vars_name = ctx.InputNames("X"); framework::LibraryType library{framework::LibraryType::kPlain}; framework::DataLayout layout{framework::DataLayout::kAnyLayout}; diff --git a/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py b/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py index 3f2a33793028f0883ffe94dd8a32626ad5c0351c..f869b96c453236e852e3a5b5fce7c41bdf418be6 100644 --- a/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py +++ b/python/paddle/fluid/tests/unittests/test_add_position_encoding_op.py @@ -39,13 +39,14 @@ class TestAddPositionEncodingTensorOp(OpTest): """ check the correctness of output """ - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): """ check the correctness of grad """ - self.check_grad(['X'], 'Out', max_relative_error=0.005) + self.check_grad( + ['X'], 'Out', max_relative_error=0.005, check_dygraph=False) def init_input_output(self): """ @@ -93,13 +94,14 @@ class TestAddPositionEncodingLoDTensorOp(OpTest): """ check the correctness of output """ - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): """ check the correctness of grad """ - self.check_grad(['X'], 'Out', max_relative_error=0.005) + self.check_grad( + ['X'], 'Out', max_relative_error=0.005, check_dygraph=False) def init_input_output(self): """ diff --git a/python/paddle/fluid/tests/unittests/test_attention_lstm_op.py b/python/paddle/fluid/tests/unittests/test_attention_lstm_op.py index 1b9c3efe0fa9e9f1b8ad09029079898622e7d489..a5fb80b09702b37434024f523378dfc49490cf54 100644 --- a/python/paddle/fluid/tests/unittests/test_attention_lstm_op.py +++ b/python/paddle/fluid/tests/unittests/test_attention_lstm_op.py @@ -152,7 +152,7 @@ class TestAttentionLSTMOp(OpTest): } def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) class TestAttentionOpNonInit(TestAttentionLSTMOp): diff --git a/python/paddle/fluid/tests/unittests/test_collect_fpn_proposals_op.py b/python/paddle/fluid/tests/unittests/test_collect_fpn_proposals_op.py index 5b8600f004bb2d4b057bd93415ba29b989d858ce..034bb7f8dc7e00a321b6c6a5a4776fa4f7398ab5 100644 --- a/python/paddle/fluid/tests/unittests/test_collect_fpn_proposals_op.py +++ b/python/paddle/fluid/tests/unittests/test_collect_fpn_proposals_op.py @@ -93,7 +93,7 @@ class TestCollectFPNProposalstOp(OpTest): self.set_data() def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index 13a4eacece8a211513d6537db0d09b80c238178e..89da6ead386074c7476bec86cd8a850b4e678691 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -27,10 +27,10 @@ class TestSumOp1(OpTest): self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_dygraph=False) class TestSumOp2(OpTest): @@ -45,10 +45,10 @@ class TestSumOp2(OpTest): } def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_dygraph=False) class TestSumOp3(OpTest): @@ -59,10 +59,10 @@ class TestSumOp3(OpTest): self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_dygraph=False) class TestSumOp4(OpTest): @@ -73,10 +73,10 @@ class TestSumOp4(OpTest): self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_dygraph=False) class TestSumOp5(OpTest): @@ -86,10 +86,10 @@ class TestSumOp5(OpTest): self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_dygraph=False) class TestSumOp7(OpTest): @@ -99,10 +99,10 @@ class TestSumOp7(OpTest): self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_dygraph=False) class TestSumOp8(OpTest): @@ -119,10 +119,10 @@ class TestSumOp8(OpTest): } def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_dygraph=False) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_cvm_op.py b/python/paddle/fluid/tests/unittests/test_cvm_op.py index 6752307af126318137776218e97dc2ef7140b8af..276d00bb2bfcfa032894602540b8ff2c3d8e1095 100644 --- a/python/paddle/fluid/tests/unittests/test_cvm_op.py +++ b/python/paddle/fluid/tests/unittests/test_cvm_op.py @@ -77,7 +77,7 @@ class TestCVMOpWithLodTensor(OpTest): self.outputs = {'Y': (np.array(out), lod)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): user_grads = np.array( @@ -85,7 +85,8 @@ class TestCVMOpWithLodTensor(OpTest): (self.batch_size, self.item_width)).astype("float32") user_grads[:, :2] = self.inputs['CVM'].reshape(self.batch_size, 2) user_grads = [user_grads] - self.check_grad(['X'], 'Y', user_defined_grads=user_grads) + self.check_grad( + ['X'], 'Y', user_defined_grads=user_grads, check_dygraph=False) class TestCVMOpWithOutLodTensor1(OpTest): @@ -111,7 +112,7 @@ class TestCVMOpWithOutLodTensor1(OpTest): self.outputs = {'Y': output} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): numel = self.batch_size * self.item_width @@ -119,7 +120,8 @@ class TestCVMOpWithOutLodTensor1(OpTest): (self.batch_size, self.item_width)).astype("float32") user_grads[:, :2] = self.inputs['CVM'].reshape(self.batch_size, 2) user_grads = [user_grads] - self.check_grad(['X'], 'Y', user_defined_grads=user_grads) + self.check_grad( + ['X'], 'Y', user_defined_grads=user_grads, check_dygraph=False) class TestCVMOpWithOutLodTensor2(OpTest): @@ -145,7 +147,7 @@ class TestCVMOpWithOutLodTensor2(OpTest): self.outputs = {'Y': output} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): numel = self.batch_size * self.item_width @@ -154,7 +156,8 @@ class TestCVMOpWithOutLodTensor2(OpTest): (self.batch_size, self.item_width)).astype("float32") user_grads[:, :2] = self.inputs['CVM'].reshape(self.batch_size, 2) user_grads = [user_grads] - self.check_grad(['X'], 'Y', user_defined_grads=user_grads) + self.check_grad( + ['X'], 'Y', user_defined_grads=user_grads, check_dygraph=False) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_fill_op.py b/python/paddle/fluid/tests/unittests/test_fill_op.py index 0dd1b0d869ae7f21f1d64c374010e2175e70ee33..96af96f6c7c4477ffa5afd822e629bfee5803d2e 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_op.py @@ -35,7 +35,7 @@ class TestFillOp1(OpTest): self.outputs = {'Out': val.astype('float64')} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) class TestFillOp2(OpTest): @@ -52,7 +52,7 @@ class TestFillOp2(OpTest): self.outputs = {'Out': val.astype('float64')} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) class TestFillOp3(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_fused_embedding_fc_lstm_op.py b/python/paddle/fluid/tests/unittests/test_fused_embedding_fc_lstm_op.py index 70ca521d3387ac11cd41d8496b4d094667232d4c..7988c66c1724042bb748e3785bc6dc7f17a759f3 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_embedding_fc_lstm_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_embedding_fc_lstm_op.py @@ -140,7 +140,7 @@ class TestFusionLSTMOp(OpTest): def test_check_output(self): for use_seq in {True, False}: self.attrs['use_seq'] = use_seq - self.check_output() + self.check_output(check_dygraph=False) class TestFusionLSTMOpInit(TestFusionLSTMOp): diff --git a/python/paddle/fluid/tests/unittests/test_fusion_gru_op.py b/python/paddle/fluid/tests/unittests/test_fusion_gru_op.py index 377454e7802e40f90c371987adfe50cce922c764..fb7454542587323a8775b066646bb1cd1c79c9ec 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_gru_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_gru_op.py @@ -95,7 +95,7 @@ class TestFusionGRUOp(OpTest): def test_check_output(self): for use_seq in {True, False}: self.attrs['use_seq'] = use_seq - self.check_output() + self.check_output(check_dygraph=False) class TestFusionGRUOpNoInitial(TestFusionGRUOp): diff --git a/python/paddle/fluid/tests/unittests/test_fusion_lstm_op.py b/python/paddle/fluid/tests/unittests/test_fusion_lstm_op.py index de0c86f96db958eebd7e74346bec244f0c804ed9..e829797ddbbdbb77d6b23e78cdbbb3816b8cce92 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_lstm_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_lstm_op.py @@ -116,7 +116,7 @@ class TestFusionLSTMOp(OpTest): def test_check_output(self): for use_seq in {True, False}: self.attrs['use_seq'] = use_seq - self.check_output() + self.check_output(check_dygraph=False) class TestFusionLSTMOpInit(TestFusionLSTMOp): diff --git a/python/paddle/fluid/tests/unittests/test_fusion_seqexpand_concat_fc_op.py b/python/paddle/fluid/tests/unittests/test_fusion_seqexpand_concat_fc_op.py index aeee3a9999a94b4979fc3793150101352e50be85..702545d2ee4ca8f3c8f41272cd0bb59eabdf5a8c 100644 --- a/python/paddle/fluid/tests/unittests/test_fusion_seqexpand_concat_fc_op.py +++ b/python/paddle/fluid/tests/unittests/test_fusion_seqexpand_concat_fc_op.py @@ -92,7 +92,7 @@ class TestFusionSeqExpandConcatFCOp(OpTest): self.attrs = {'fc_activation': self.fc_act} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) class TestFusionSECFCOpNonBias(TestFusionSeqExpandConcatFCOp): diff --git a/python/paddle/fluid/tests/unittests/test_gru_op.py b/python/paddle/fluid/tests/unittests/test_gru_op.py index 17af1d88d086f9a53ef8075572184a4cd4d3be88..bce459461a48335ed6764eb0b653a670bcde4171 100644 --- a/python/paddle/fluid/tests/unittests/test_gru_op.py +++ b/python/paddle/fluid/tests/unittests/test_gru_op.py @@ -155,10 +155,11 @@ class TestGRUOp(OpTest): } def test_check_output(self): - self.check_output(atol=1e-8, check_dygraph=True) + self.check_output(atol=1e-8, check_dygraph=False) def test_check_grad(self): - self.check_grad(['Input', 'H0', 'Weight', 'Bias'], ['Hidden']) + self.check_grad( + ['Input', 'H0', 'Weight', 'Bias'], ['Hidden'], check_dygraph=False) class TestGRUOriginMode(TestGRUOp): @@ -207,7 +208,8 @@ class TestGRUOpNoInitial(TestGRUOp): self.with_h0 = False def test_check_grad(self): - self.check_grad(['Input', 'Weight', 'Bias'], ['Hidden']) + self.check_grad( + ['Input', 'Weight', 'Bias'], ['Hidden'], check_dygraph=False) class TestGRUOpNoBias(TestGRUOp): @@ -215,7 +217,8 @@ class TestGRUOpNoBias(TestGRUOp): self.with_bias = False def test_check_grad(self): - self.check_grad(['Input', 'H0', 'Weight'], ['Hidden']) + self.check_grad( + ['Input', 'H0', 'Weight'], ['Hidden'], check_dygraph=False) class TestGRUOpReverse(TestGRUOp): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index 6424f15920f714a424ddea1aa79265247babed86..9dcea95aa97660673214dc37f77a7c3c4d8fe65a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -221,7 +221,6 @@ class TestDygraphPtbRnn(unittest.TestCase): init_scale = 0.1 batch_size = 4 batch_num = 200 - traced_layer = None with fluid.dygraph.guard(): diff --git a/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py index 7c1808cf998e84c22c46df68ef07259c1a021c19..12d99b9cfc830b4a62a7bc4b7f3f4a10baeaff95 100644 --- a/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py +++ b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py @@ -24,7 +24,7 @@ from op_test import OpTest class TestIOUSimilarityOp(OpTest): def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def setUp(self): self.op_type = "iou_similarity" @@ -56,7 +56,7 @@ class TestIOUSimilarityOp(OpTest): class TestIOUSimilarityOpWithLoD(TestIOUSimilarityOp): def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def setUp(self): super(TestIOUSimilarityOpWithLoD, self).setUp() diff --git a/python/paddle/fluid/tests/unittests/test_lstm_op.py b/python/paddle/fluid/tests/unittests/test_lstm_op.py index 7ee33c6e9ec1995f6b365e556c7adce20eb16270..2b3d9be8200bdde4d5118ba5b7e45b49d02419a0 100644 --- a/python/paddle/fluid/tests/unittests/test_lstm_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstm_op.py @@ -188,7 +188,7 @@ class TestLstmOp(OpTest): } def test_check_output(self): - self.check_output(atol=1e-8) + self.check_output(atol=1e-8, check_dygraph=False) def test_check_grad(self): # TODO(qingqing) remove folowing lines after the check_grad is refined. @@ -197,7 +197,9 @@ class TestLstmOp(OpTest): self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') self.check_grad( - ['Input', 'Weight', 'Bias'], ['Hidden'], max_relative_error=5e-4) + ['Input', 'Weight', 'Bias'], ['Hidden'], + max_relative_error=5e-4, + check_dygraph=False) class TestLstmOpCase1(TestLstmOp): diff --git a/python/paddle/fluid/tests/unittests/test_lstmp_op.py b/python/paddle/fluid/tests/unittests/test_lstmp_op.py index 70a0af6c9854efdf4d8b7c849c15e7aff6935fb2..8d5f051680028bd31df0605a62ee88f0b0a2d3c8 100644 --- a/python/paddle/fluid/tests/unittests/test_lstmp_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstmp_op.py @@ -185,7 +185,7 @@ class TestLstmpOp(LstmTest.TestLstmOp): } def test_check_output(self): - self.check_output(atol=1e-8) + self.check_output(atol=1e-8, check_dygraph=False) def test_check_grad(self): # TODO(qingqing) remove folowing lines after the check_grad is refined. @@ -197,7 +197,8 @@ class TestLstmpOp(LstmTest.TestLstmOp): self.check_grad( ['Input', 'Weight', 'ProjWeight', 'Bias'], ['Projection'], max_relative_error=1e-2, - numeric_grad_delta=0.0000005) + numeric_grad_delta=0.0000005, + check_dygraph=False) class TestLstmpOpHasInitial(TestLstmpOp): @@ -215,7 +216,8 @@ class TestLstmpOpHasInitial(TestLstmpOp): ['Input', 'Weight', 'ProjWeight', 'Bias', 'H0', 'C0'], ['Projection'], numeric_grad_delta=0.0000005, - max_relative_error=1e-2) + max_relative_error=1e-2, + check_dygraph=False) def test_check_grad_ingore_bias(self): N = len(self.lod[0]) @@ -227,7 +229,8 @@ class TestLstmpOpHasInitial(TestLstmpOp): ['Input', 'ProjWeight', 'Weight'], ['Projection'], max_relative_error=1e-2, numeric_grad_delta=0.0000005, - no_grad_set=set('Bias')) + no_grad_set=set('Bias'), + check_dygraph=False) def test_check_grad_ingore_weight(self): N = len(self.lod[0]) @@ -239,7 +242,8 @@ class TestLstmpOpHasInitial(TestLstmpOp): ['Input', 'ProjWeight', 'Bias'], ['Projection'], max_relative_error=1e-2, numeric_grad_delta=0.0000005, - no_grad_set=set('Weight')) + no_grad_set=set('Weight'), + check_dygraph=False) def test_check_grad_ingore_proj_weight(self): N = len(self.lod[0]) @@ -251,7 +255,8 @@ class TestLstmpOpHasInitial(TestLstmpOp): ['Input', 'Weight', 'Bias'], ['Projection'], max_relative_error=1e-2, numeric_grad_delta=0.0000005, - no_grad_set=set('ProjWeight')) + no_grad_set=set('ProjWeight'), + check_dygraph=False) def test_check_grad_ingore_input(self): N = len(self.lod[0]) @@ -263,7 +268,8 @@ class TestLstmpOpHasInitial(TestLstmpOp): ['Weight', 'ProjWeight', 'Bias'], ['Projection'], max_relative_error=1e-2, numeric_grad_delta=0.0000005, - no_grad_set=set('Input')) + no_grad_set=set('Input'), + check_dygraph=False) def test_check_grad_ingore_h0(self): N = len(self.lod[0]) @@ -275,7 +281,8 @@ class TestLstmpOpHasInitial(TestLstmpOp): ['Input', 'Weight', 'ProjWeight', 'Bias', 'C0'], ['Projection'], max_relative_error=1e-2, numeric_grad_delta=0.0000005, - no_grad_set=set('H0')) + no_grad_set=set('H0'), + check_dygraph=False) def test_check_grad_ingore_c0(self): N = len(self.lod[0]) @@ -287,7 +294,8 @@ class TestLstmpOpHasInitial(TestLstmpOp): ['Input', 'Weight', 'ProjWeight', 'Bias', 'H0'], ['Projection'], max_relative_error=1e-2, numeric_grad_delta=0.0000005, - no_grad_set=set('C0')) + no_grad_set=set('C0'), + check_dygraph=False) class TestLstmpOpRerverse(TestLstmpOp): diff --git a/python/paddle/fluid/tests/unittests/test_match_matrix_tensor_op.py b/python/paddle/fluid/tests/unittests/test_match_matrix_tensor_op.py index 9487f6ed1d38baf20354e676587895214eee8cc5..8d59b25e256d9c70432ef829e035a138d0cabfe4 100644 --- a/python/paddle/fluid/tests/unittests/test_match_matrix_tensor_op.py +++ b/python/paddle/fluid/tests/unittests/test_match_matrix_tensor_op.py @@ -71,10 +71,11 @@ class TestMatchMatrixTensorOp(OpTest): self.outputs = {'Out': (out, out_lod), 'Tmp': tmp} def test_check_output(self): - self.check_output(check_compile_vs_runtime=True) + self.check_output(check_compile_vs_runtime=True, check_dygraph=False) def test_check_grad(self): - self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.005) + self.check_grad( + ['X', 'Y'], 'Out', max_relative_error=0.005, check_dygraph=False) class TestMatchMatrixTensorOpCase1(TestMatchMatrixTensorOp): diff --git a/python/paddle/fluid/tests/unittests/test_one_hot_op.py b/python/paddle/fluid/tests/unittests/test_one_hot_op.py index 62184f771942b2f94b65ffd2f2253e1121d15f9d..0ff44058d44660f68182743ddf29cc11f1025850 100644 --- a/python/paddle/fluid/tests/unittests/test_one_hot_op.py +++ b/python/paddle/fluid/tests/unittests/test_one_hot_op.py @@ -45,7 +45,7 @@ class TestOneHotOp(OpTest): self.outputs = {'Out': (out, x_lod)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) class TestOneHotOp_attr(OpTest): @@ -68,7 +68,7 @@ class TestOneHotOp_attr(OpTest): self.outputs = {'Out': (out, x_lod)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) class TestOneHotOp_default_dtype(OpTest): @@ -92,7 +92,7 @@ class TestOneHotOp_default_dtype(OpTest): self.outputs = {'Out': (out, x_lod)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) class TestOneHotOp_default_dtype_attr(OpTest): @@ -115,7 +115,7 @@ class TestOneHotOp_default_dtype_attr(OpTest): self.outputs = {'Out': (out, x_lod)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) class TestOneHotOp_out_of_range(OpTest): @@ -134,7 +134,7 @@ class TestOneHotOp_out_of_range(OpTest): self.outputs = {'Out': (out, x_lod)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) class TestOneHotOp_exception(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py b/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py index dc948c42bc6d6a568f99e8c709514e4196c5a81c..73fd00e914dce384047cf82989db044d1c9a0b1f 100644 --- a/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py @@ -44,7 +44,7 @@ class TestOneHotOp(OpTest): self.outputs = {'Out': (out, x_lod)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) class TestOneHotOp_attr(OpTest): @@ -67,7 +67,7 @@ class TestOneHotOp_attr(OpTest): self.outputs = {'Out': (out, x_lod)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) class TestOneHotOp_default_dtype(OpTest): @@ -90,7 +90,7 @@ class TestOneHotOp_default_dtype(OpTest): self.outputs = {'Out': (out, x_lod)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) class TestOneHotOp_default_dtype_attr(OpTest): @@ -113,7 +113,7 @@ class TestOneHotOp_default_dtype_attr(OpTest): self.outputs = {'Out': (out, x_lod)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) class TestOneHotOp_out_of_range(OpTest): @@ -131,7 +131,7 @@ class TestOneHotOp_out_of_range(OpTest): self.outputs = {'Out': (out, x_lod)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) class TestOneHotOp_exception(OpTest): diff --git a/python/paddle/fluid/tests/unittests/test_row_conv_op.py b/python/paddle/fluid/tests/unittests/test_row_conv_op.py index 301d05260e0ae0852f420565edbffc77c51e1b38..74dbc7886caf8be6049d45172a1d0699c3a85c47 100644 --- a/python/paddle/fluid/tests/unittests/test_row_conv_op.py +++ b/python/paddle/fluid/tests/unittests/test_row_conv_op.py @@ -60,18 +60,30 @@ class TestRowConvOp1(OpTest): self.outputs = {'Out': (out, lod)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad_normal(self): - self.check_grad(['X', 'Filter'], 'Out', max_relative_error=0.05) + self.check_grad( + ['X', 'Filter'], + 'Out', + max_relative_error=0.05, + check_dygraph=False) def test_check_grad_ignore_x(self): self.check_grad( - ['Filter'], 'Out', max_relative_error=0.05, no_grad_set=set('X')) + ['Filter'], + 'Out', + max_relative_error=0.05, + no_grad_set=set('X'), + check_dygraph=False) def test_check_grad_ignore_wt(self): self.check_grad( - ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Filter')) + ['X'], + 'Out', + max_relative_error=0.05, + no_grad_set=set('Filter'), + check_dygraph=False) class TestRowConvOp2(OpTest): @@ -91,21 +103,33 @@ class TestRowConvOp2(OpTest): self.outputs = {'Out': (out, lod)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) #max_relative_error is increased from 0.05 to 0.06 as for higher #dimensional input, the dX on CPU for some values has max_rel_error #slightly more than 0.05 def test_check_grad_normal(self): - self.check_grad(['X', 'Filter'], 'Out', max_relative_error=0.06) + self.check_grad( + ['X', 'Filter'], + 'Out', + max_relative_error=0.06, + check_dygraph=False) def test_check_grad_ignore_x(self): self.check_grad( - ['Filter'], 'Out', max_relative_error=0.06, no_grad_set=set('X')) + ['Filter'], + 'Out', + max_relative_error=0.06, + no_grad_set=set('X'), + check_dygraph=False) def test_check_grad_ignore_wt(self): self.check_grad( - ['X'], 'Out', max_relative_error=0.06, no_grad_set=set('Filter')) + ['X'], + 'Out', + max_relative_error=0.06, + no_grad_set=set('Filter'), + check_dygraph=False) def row_conv_foward_Tensor(x, wt): @@ -141,18 +165,30 @@ class TestRowOpWithTensorInput(OpTest): self.outputs = {'Out': out} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad_ignore_x(self): self.check_grad( - ['Filter'], 'Out', max_relative_error=0.05, no_grad_set=set('X')) + ['Filter'], + 'Out', + max_relative_error=0.05, + no_grad_set=set('X'), + check_dygraph=False) def test_check_grad_normal(self): - self.check_grad(['X', 'Filter'], 'Out', max_relative_error=0.05) + self.check_grad( + ['X', 'Filter'], + 'Out', + max_relative_error=0.05, + check_dygraph=False) def test_check_grad_ignore_wt(self): self.check_grad( - ['X'], 'Out', max_relative_error=0.05, no_grad_set=set('Filter')) + ['X'], + 'Out', + max_relative_error=0.05, + no_grad_set=set('Filter'), + check_dygraph=False) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_seq_pool.py b/python/paddle/fluid/tests/unittests/test_seq_pool.py index 2de5d0345912ace44858de1be52dece846ef879a..7591a1b313a598445b5e65fa49bd257815910775 100644 --- a/python/paddle/fluid/tests/unittests/test_seq_pool.py +++ b/python/paddle/fluid/tests/unittests/test_seq_pool.py @@ -77,7 +77,7 @@ class TestSeqAvgPool(OpTest): self.outputs = {'Out': (out, [self.set_lod()[0]])} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): # Remove MaxIndex after check_grad is refined. @@ -85,7 +85,7 @@ class TestSeqAvgPool(OpTest): if isinstance(out, tuple): out = out[0] self.outputs['MaxIndex'] = \ np.zeros(out.shape).astype('int32') - self.check_grad(["X"], "Out") + self.check_grad(["X"], "Out", check_dygraph=False) class TestSeqAvgPoolLen0(TestSeqAvgPool): @@ -298,7 +298,8 @@ class TestSeqSqrtPool2D(TestSeqAvgPool2D): out = out[0] self.outputs['MaxIndex'] = \ np.zeros(out.shape).astype('int32') - self.check_grad(["X"], "Out", max_relative_error=0.06) + self.check_grad( + ["X"], "Out", max_relative_error=0.06, check_dygraph=False) class TestSeqSqrtPool2DLen0(TestSeqSqrtPool2D): diff --git a/python/paddle/fluid/tests/unittests/test_sequence_expand.py b/python/paddle/fluid/tests/unittests/test_sequence_expand.py index 1e4d1119789533eb020f102bb1b08f00311ceae1..8a27f7238c94211c1dd4649d17793f4f5fcde56b 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_expand.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_expand.py @@ -72,10 +72,10 @@ class TestSequenceExpand(OpTest): self.compute() def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(["X"], "Out") + self.check_grad(["X"], "Out", check_dygraph=False) class TestSequenceExpandCase1(TestSequenceExpand): diff --git a/python/paddle/fluid/tests/unittests/test_sequence_expand_as.py b/python/paddle/fluid/tests/unittests/test_sequence_expand_as.py index 30c487eea3dfb2c5d2349a00e62d91a7b7fdc013..cf258a5e5cbc296fbcaabe63c6bec2463cef6922 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_expand_as.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_expand_as.py @@ -49,10 +49,10 @@ class TestSequenceExpandAs(OpTest): self.outputs = {'Out': (out_data, y_lod)} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(["X"], "Out") + self.check_grad(["X"], "Out", check_dygraph=False) class TestSequenceExpandAsCase1(TestSequenceExpandAs): diff --git a/python/paddle/fluid/tests/unittests/test_sequence_pad_op.py b/python/paddle/fluid/tests/unittests/test_sequence_pad_op.py index 1791df350c1d1fe4ee60cb0863051c86ed93e0ad..3b4701e4ca6c6efba271fa401a115b276485afba 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_pad_op.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_pad_op.py @@ -72,10 +72,10 @@ class TestSequencePadOp(OpTest): self.compute() def test_check_output(self): - self.check_output(check_compile_vs_runtime=True) + self.check_output(check_compile_vs_runtime=True, check_dygraph=False) def test_check_grad(self): - self.check_grad(["X"], "Out") + self.check_grad(["X"], "Out", check_dygraph=False) class TestSequencePadOp2(TestSequencePadOp): diff --git a/python/paddle/fluid/tests/unittests/test_sequence_reverse.py b/python/paddle/fluid/tests/unittests/test_sequence_reverse.py index 09fb068ae6682be3d0f6506841eb8efceea7b61c..2b8f9860664f4c87f3459bbce839ffb1c5186ac9 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_reverse.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_reverse.py @@ -47,10 +47,10 @@ class TestSequenceReverseBase(OpTest): return np.reshape(tmp_y, newshape=self.x.shape).astype(self.dtype) def test_output(self): - self.check_output(0) + self.check_output(0, check_dygraph=False) def test_grad(self): - self.check_grad(['X'], 'Y') + self.check_grad(['X'], 'Y', check_dygraph=False) class TestSequenceReserve1(TestSequenceReverseBase): diff --git a/python/paddle/fluid/tests/unittests/test_sequence_scatter_op.py b/python/paddle/fluid/tests/unittests/test_sequence_scatter_op.py index 4ffe2c2a12bc12eaa4f6ddb860f977de1265cb54..d3ad734d818b542c881848576853e05c63e295d9 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_scatter_op.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_scatter_op.py @@ -47,10 +47,10 @@ class TestSequenceScatterOp(OpTest): self.outputs = {'Out': Out_data} def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(['Updates'], 'Out', in_place=True) + self.check_grad(['Updates'], 'Out', in_place=True, check_dygraph=False) class TestSequenceScatterOpSeqLen0(TestSequenceScatterOp): diff --git a/python/paddle/fluid/tests/unittests/test_sequence_unpad_op.py b/python/paddle/fluid/tests/unittests/test_sequence_unpad_op.py index ec63b87bfaee09853be8d598ef334eee237f1192..91f0ff713a877c3db6b562ba4be9d5e7b0251050 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_unpad_op.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_unpad_op.py @@ -48,10 +48,10 @@ class TestSequenceUnpadOp(OpTest): self.compute() def test_check_output(self): - self.check_output(check_compile_vs_runtime=True) + self.check_output(check_compile_vs_runtime=True, check_dygraph=False) def test_check_grad(self): - self.check_grad(["X"], "Out") + self.check_grad(["X"], "Out", check_dygraph=False) class TestSequenceUnpadOp2(TestSequenceUnpadOp): diff --git a/python/paddle/fluid/tests/unittests/test_var_conv_2d.py b/python/paddle/fluid/tests/unittests/test_var_conv_2d.py index 60a0dcae8541a0a40da8af5836d1859aa6e62340..847fdd3349d41345fc74dba66e8bef9f8e773d38 100644 --- a/python/paddle/fluid/tests/unittests/test_var_conv_2d.py +++ b/python/paddle/fluid/tests/unittests/test_var_conv_2d.py @@ -172,10 +172,11 @@ class TestVarConv2dOp(OpTest): return col_res, col_res_lod def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): - self.check_grad(['X'], 'Out', max_relative_error=0.005) + self.check_grad( + ['X'], 'Out', max_relative_error=0.005, check_dygraph=False) class TestVarConv2dOpCase1(TestVarConv2dOp): diff --git a/python/paddle/fluid/tests/unittests/test_warpctc_op.py b/python/paddle/fluid/tests/unittests/test_warpctc_op.py index 3bd074f4d01fa37959f240b2add1491fe377a1f8..36cad1db4d2f10102f3b9e98ff9552b9f5d937ec 100644 --- a/python/paddle/fluid/tests/unittests/test_warpctc_op.py +++ b/python/paddle/fluid/tests/unittests/test_warpctc_op.py @@ -221,11 +221,12 @@ class TestWarpCTCOp(OpTest): } def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): self.outputs['WarpCTCGrad'] = self.gradient - self.check_grad(["Logits"], "Loss", max_relative_error=0.007) + self.check_grad( + ["Logits"], "Loss", max_relative_error=0.007, check_dygraph=False) class TestWarpCTCOpCase1(TestWarpCTCOp): @@ -314,11 +315,12 @@ class TestWarpCTCOpWithPadding(OpTest): } def test_check_output(self): - self.check_output() + self.check_output(check_dygraph=False) def test_check_grad(self): self.outputs['WarpCTCGrad'] = self.gradient - self.check_grad(["Logits"], "Loss", max_relative_error=0.007) + self.check_grad( + ["Logits"], "Loss", max_relative_error=0.007, check_dygraph=False) class TestWarpCTCOpWithPaddingCase1(TestWarpCTCOpWithPadding):