diff --git a/cmake/util.cmake b/cmake/util.cmake index e814cad36f2a8ce95a2dc9fabc35cb39506d4cd7..ac911052eb970c5a3e485e3178dd788b1517ca30 100644 --- a/cmake/util.cmake +++ b/cmake/util.cmake @@ -25,7 +25,7 @@ function(target_circle_link_libraries TARGET_NAME) endif() endforeach() if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") - if(IOS AND NOT IOS_ENABLE_BITCODE) + if(NOT IOS_ENABLE_BITCODE) list(APPEND LIBS "-undefined dynamic_lookup") endif() endif() diff --git a/doc/faq/index_cn.rst b/doc/faq/index_cn.rst index 00192aa69bd487787a8743d5589a365eacbd4ff3..acbf4c87ae5242f6cfc593a7fddc649ee3a70d7c 100644 --- a/doc/faq/index_cn.rst +++ b/doc/faq/index_cn.rst @@ -158,17 +158,23 @@ PaddlePaddle的参数使用名字 :code:`name` 作为参数的ID,相同名字 这里 :code:`hidden_a` 和 :code:`hidden_b` 使用了同样的parameter和bias。并且softmax层的两个输入也使用了同样的参数 :code:`softmax_param`。 -7. \*-cp27mu-linux_x86_64.whl is not a supported wheel on this platform. +7. paddlepaddle\*.whl is not a supported wheel on this platform. ------------------------------------------------------------------------ -出现这个问题的主要原因是,系统编译wheel包的时候,使用的 :code:`wheel` 包是最新的, -而系统中的 :code:`pip` 包比较老。具体的解决方法是,更新 :code:`pip` 包并重新编译PaddlePaddle。 +出现这个问题的主要原因是,没有找到和当前系统匹配的paddlepaddle安装包。最新的paddlepaddle python安装包支持Linux x86_64和MacOS 10.12操作系统,并安装了python 2.7和pip 9.0.1。 + 更新 :code:`pip` 包的方法是\: .. code-block:: bash pip install --upgrade pip +如果还不行,可以执行 :code:`python -c "import pip; print(pip.pep425tags.get_supported())"` 获取当前系统支持的python包的后缀, +并对比是否和正在安装的后缀一致。 + +如果系统支持的是 :code:`linux_x86_64` 而安装包是 :code:`manylinux1_x86_64` ,需要升级pip版本到最新; +如果系统支持 :code:`manylinux1_x86_64` 而安装包(本地)是 :code:`linux_x86_64` ,可以重命名这个whl包为 :code:`manylinux1_x86_64` 再安装。 + 8. python相关的单元测试都过不了 -------------------------------- @@ -310,7 +316,7 @@ Paddle二进制在运行时捕获了浮点数异常,只要出现浮点数异 * 模型一直不收敛,发散到了一个数值特别大的地方。 * 训练数据有问题,导致参数收敛到了一些奇异的情况。或者输入数据尺度过大,有些特征的取值达到数百万,这时进行矩阵乘法运算就可能导致浮点数溢出。 -主要的解决办法是减小学习律或者对数据进行归一化处理。 +主要的解决办法是减小学习率或者对数据进行归一化处理。 15. 编译安装后执行 import paddle.v2 as paddle 报ImportError: No module named v2 ------------------------------------------------------------------------ @@ -373,3 +379,15 @@ PaddlePaddle保存的模型参数文件内容由16字节头信息和网络参数 parameters = paddle.parameters.create(my_cost) parameters.set('emb', load_parameter(emb_param_file, 30000, 256)) + +18. 集群多节点训练,日志中保存均为网络通信类错误 +------------------------------ + +集群多节点训练,日志报错为网络通信类错误,比如 :code:`Connection reset by peer` 等。 +此类报错通常是由于某一个节点的错误导致这个节点的训练进程退出,从而引发其他节点无法连接导致,可以参考下面的步骤排查: + +* 从 :code:`train.log` , :code:`server.log` 找到最早报错的地方,查看是否是其他错误引发的报错(比如FPE,内存不足,磁盘空间不足等)。 + +* 如果发现最早的报错就是网络通信的问题,很有可能是非独占方式执行导致的端口冲突,可以联系OP,看当前MPI集群是否支持resource=full参数提交,如果支持增加此参数提交,并更换job 端口。 + +* 如果当前MPI集群并不支持任务独占模式,可以联系OP是否可以更换集群或升级当前集群。 \ No newline at end of file diff --git a/paddle/framework/attribute.cc b/paddle/framework/attribute.cc index 27132eaa0b3b0666fc042faf052dac2e169ba9e7..159ed03b92bbc57ab79734de832845ef1f367de9 100644 --- a/paddle/framework/attribute.cc +++ b/paddle/framework/attribute.cc @@ -19,6 +19,15 @@ limitations under the License. */ namespace paddle { namespace framework { +static ProgramDesc* g_program_desc = nullptr; + +ProgramDesc& GetProgramDesc() { + if (g_program_desc == nullptr) { + g_program_desc = new ProgramDesc(); + } + return *g_program_desc; +} + template <> AttrType AttrTypeID() { return INT; @@ -47,40 +56,44 @@ template <> AttrType AttrTypeID>>() { return INT_PAIRS; } +template <> +AttrType AttrTypeID() { + return BLOCK; +} Attribute GetAttrValue(const OpDesc::Attr& attr_desc) { switch (attr_desc.type()) { - case paddle::framework::AttrType::INT: { + case framework::AttrType::INT: { return attr_desc.i(); } - case paddle::framework::AttrType::FLOAT: { + case framework::AttrType::FLOAT: { return attr_desc.f(); } - case paddle::framework::AttrType::STRING: { + case framework::AttrType::STRING: { return attr_desc.s(); } - case paddle::framework::AttrType::INTS: { + case framework::AttrType::INTS: { std::vector val(attr_desc.ints_size()); for (int i = 0; i < attr_desc.ints_size(); ++i) { val[i] = attr_desc.ints(i); } return val; } - case paddle::framework::AttrType::FLOATS: { + case framework::AttrType::FLOATS: { std::vector val(attr_desc.floats_size()); for (int i = 0; i < attr_desc.floats_size(); ++i) { val[i] = attr_desc.floats(i); } return val; } - case paddle::framework::AttrType::STRINGS: { + case framework::AttrType::STRINGS: { std::vector val(attr_desc.strings_size()); for (int i = 0; i < attr_desc.strings_size(); ++i) { val[i] = attr_desc.strings(i); } return val; } - case paddle::framework::AttrType::INT_PAIRS: { + case framework::AttrType::INT_PAIRS: { std::vector> val(attr_desc.int_pairs_size()); for (int i = 0; i < attr_desc.int_pairs_size(); ++i) { val[i].first = attr_desc.int_pairs(i).first(); @@ -88,6 +101,9 @@ Attribute GetAttrValue(const OpDesc::Attr& attr_desc) { } return val; } + case framework::AttrType::BLOCK: { + return GetProgramDesc().mutable_blocks(attr_desc.block_idx()); + } } PADDLE_ENFORCE(false, "Unknown OpDesc::AttrDesc::type !"); return boost::blank(); diff --git a/paddle/framework/attribute.h b/paddle/framework/attribute.h index 2b788a76cafe198abb9aed8ba842e37cc6ff73a6..6735ca0a8d70d04dae9dd610742370e83472e9b0 100644 --- a/paddle/framework/attribute.h +++ b/paddle/framework/attribute.h @@ -29,11 +29,13 @@ namespace framework { typedef boost::variant, std::vector, std::vector, - std::vector>> + std::vector>, BlockDesc*> Attribute; typedef std::unordered_map AttributeMap; +ProgramDesc& GetProgramDesc(); + template AttrType AttrTypeID(); diff --git a/paddle/framework/backward.cc b/paddle/framework/backward.cc index c5d46622156c56acb98fb77e7db5ee7bca8c937a..0ec18de5b8a0e7cebdb91c30d2b45596b02dfa51 100644 --- a/paddle/framework/backward.cc +++ b/paddle/framework/backward.cc @@ -166,9 +166,8 @@ static std::unique_ptr BackwardRecursive( // If part of input gradient of that operator is not calculated, fill // zero variables to that input gradient. - net->AppendOp(OpRegistry::CreateOp("fill_zeros_like", - {{"Src", {prefix}}}, - {{"Dst", {grad_input}}}, {})); + net->AppendOp(OpRegistry::CreateOp("fill_zeros_like", {{"X", {prefix}}}, + {{"Y", {grad_input}}}, {})); } return false; }); diff --git a/paddle/framework/backward_test.cc b/paddle/framework/backward_test.cc index ad8003420dc14538d0dae9a1cb19d6459b154576..6932f5b989a3e21ebc44ec4fec9f5223f2547d7a 100644 --- a/paddle/framework/backward_test.cc +++ b/paddle/framework/backward_test.cc @@ -127,8 +127,8 @@ class FillZeroOpMaker : public OpProtoAndCheckerMaker { public: FillZeroOpMaker(OpProto *proto, OpAttrChecker *op_checker) : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Src", "x"); - AddOutput("Dst", "out"); + AddInput("X", "x"); + AddOutput("Y", "out"); AddComment(""); } }; @@ -325,10 +325,10 @@ TEST(Backward, op_part_of_output_are_not_need) { auto &fill_zero = *net->ops_[0]; ASSERT_EQ("fill_zeros_like", fill_zero.Type()); - ASSERT_EQ(1UL, fill_zero.Inputs("Src").size()); - ASSERT_EQ("Z", fill_zero.Input("Src")); - ASSERT_EQ(1UL, fill_zero.Outputs("Dst").size()); - ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.Output("Dst")); + ASSERT_EQ(1UL, fill_zero.Inputs("X").size()); + ASSERT_EQ("Z", fill_zero.Input("X")); + ASSERT_EQ(1UL, fill_zero.Outputs("Y").size()); + ASSERT_EQ(std::string("Z") + f::kZeroVarSuffix, fill_zero.Output("Y")); auto &d_many_out = *net->ops_[1]; ASSERT_EQ("many_output_op_grad", d_many_out.Type()); diff --git a/paddle/framework/framework.proto b/paddle/framework/framework.proto index dfcb5fb6210a08f35193b83e3b5f7cee92f618d7..89a49f69062486ace67154f52450e7449a948851 100644 --- a/paddle/framework/framework.proto +++ b/paddle/framework/framework.proto @@ -23,6 +23,7 @@ enum AttrType { FLOATS = 4; STRINGS = 5; INT_PAIRS = 6; + BLOCK = 7; } message IntPair { @@ -44,6 +45,7 @@ message OpDesc { repeated float floats = 7; repeated string strings = 8; repeated IntPair int_pairs = 9; + optional int32 block_idx = 10; }; message Var { @@ -108,3 +110,12 @@ message VarDesc { required string name = 1; optional LoDTensorDesc lod_tensor = 2; } + +message BlockDesc { + required int32 idx = 1; + required int32 parent_idx = 2; + repeated VarDesc vars = 3; + repeated OpDesc ops = 4; +} + +message ProgramDesc { repeated BlockDesc blocks = 1; } diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index 49509af6630ada5c2ec724525ec0a6eab02679f9..a75fd49a470fa5f9ce7a47ecd03c1084f9b48c27 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -207,23 +207,22 @@ const std::vector InferShapeContext::MultiInput( } template <> -Tensor* ExecutionContext::Output(const std::string& name) const { - auto* var = OutputVar(name); - return var == nullptr ? nullptr : const_cast(GetTensorFromVar(var)); +Tensor* InferShapeContext::Output(const std::string& name) const { + auto var = OutputVar(name); + return var == nullptr ? nullptr : var->GetMutable(); } template <> -std::vector ExecutionContext::MultiOutput( +std::vector InferShapeContext::MultiOutput( const std::string& name) const { auto names = op().Outputs(name); std::vector res; res.reserve(names.size()); std::transform(names.begin(), names.end(), std::back_inserter(res), [&](const std::string& sub_name) { - auto var = scope().FindVar(sub_name); - return var == nullptr - ? nullptr - : const_cast(GetTensorFromVar(var)); + auto var = scope_.FindVar(sub_name); + return var == nullptr ? nullptr + : var->GetMutable(); }); return res; } diff --git a/paddle/framework/operator.h b/paddle/framework/operator.h index 1a78b6d1e146d2d157e353c5729d8518ee264517..2d6d5510ef6dc83f1a016be6ff123f0b9bcaf230 100644 --- a/paddle/framework/operator.h +++ b/paddle/framework/operator.h @@ -212,9 +212,9 @@ class InferShapeContext { return res; } - std::vector MultiOutputVar(const std::string& name) const { + std::vector MultiOutputVar(const std::string& name) const { auto names = op_.Outputs(name); - std::vector res; + std::vector res; res.reserve(names.size()); std::transform(names.begin(), names.end(), std::back_inserter(res), [this](const std::string& name) { @@ -271,6 +271,20 @@ class InferShapeContext { return &var->Get(); } + void ShareLoD(const std::string& in, const std::string& out, size_t i = 0, + size_t j = 0) const { + PADDLE_ENFORCE_LT(i, InputSize(in)); + PADDLE_ENFORCE_LT(j, OutputSize(out)); + auto* in_var = MultiInputVar(in)[i]; + auto* out_var = MultiOutputVar(out)[j]; + if (!in_var->IsType()) return; + PADDLE_ENFORCE(out_var->IsType(), + "The %d-th output of Output(%s) must be LoDTensor.", j, out); + auto in_tensor = in_var->Get(); + auto* out_tensor = out_var->GetMutable(); + out_tensor->set_lod(in_tensor.lod()); + } + private: const OperatorBase& op_; const Scope& scope_; @@ -283,6 +297,13 @@ template <> const std::vector InferShapeContext::MultiInput( const std::string& name) const; +template <> +Tensor* InferShapeContext::Output(const std::string& name) const; + +template <> +std::vector InferShapeContext::MultiOutput( + const std::string& name) const; + template struct EigenDeviceConverter; @@ -315,38 +336,10 @@ class ExecutionContext : public InferShapeContext { return device_context_; } - // redefine Output function, - // use Variable::Get instead of Variable::GetMutable - template - T* Output(const std::string& name) const { - auto var = OutputVar(name); - return var == nullptr ? nullptr : const_cast(&var->Get()); - } - - // redefine MultiOutput function. - // use Variable::Get instead of Variable::GetMutable - template - std::vector MultiOutput(const std::string& name) const { - auto names = op().Outputs(name); - std::vector res; - res.reserve(names.size()); - std::transform( - names.begin(), names.end(), std::back_inserter(res), - [&](const std::string& sub_name) { return Output(sub_name); }); - return res; - } - private: const platform::DeviceContext& device_context_; }; -template <> -Tensor* ExecutionContext::Output(const std::string& name) const; - -template <> -std::vector ExecutionContext::MultiOutput( - const std::string& name) const; - class OpKernel { public: /** diff --git a/paddle/operators/accuracy_op.cc b/paddle/operators/accuracy_op.cc index 0c813748b2989a8f0c00a359345747242dd21dd8..70e4f9da1221ab300e2b507a3da2f7c5da93f2e4 100644 --- a/paddle/operators/accuracy_op.cc +++ b/paddle/operators/accuracy_op.cc @@ -39,7 +39,8 @@ class AccuracyOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(inference->dims()[0], label->dims()[0], "inference size must be the same as label size"); - ctx.Output("Accuracy")->Resize({1}); + ctx.Output("Accuracy")->Resize({1}); + ctx.ShareLoD("Inference", /*->*/ "Accuracy"); } }; @@ -54,11 +55,15 @@ class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker { // TODO(typhoonzero): AddInput("Weight", ... AddOutput("Accuracy", "The accuracy of current batch"); - AddComment( - R"DOC(Accuracy. It will print accuracy rate for classification. + AddComment(R"DOC( +Accuracy. It will print accuracy rate for classification. The accuracy is: .. math:: -accuracy = \\frac{NumOfCorrectPredicts}{NumOfAllSamples})DOC"); +accuracy = \\frac{NumOfCorrectPredicts}{NumOfAllSamples}) + +Both the input `Inference` and `Label` can carry the LoD (Level of Details) +information, or not. But the output only shares the LoD with input `Inference`. +)DOC"); } }; diff --git a/paddle/operators/activation_op.cc b/paddle/operators/activation_op.cc index cc55767cef9552475321bcb8c06d74a8d91dc99b..06654702bc42cc7cf4917b00693334b1d36ce371 100644 --- a/paddle/operators/activation_op.cc +++ b/paddle/operators/activation_op.cc @@ -23,8 +23,9 @@ class ActivationOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - ctx.Output("Y")->Resize( + ctx.Output("Y")->Resize( ctx.Input("X")->dims()); + ctx.ShareLoD("X", /*->*/ "Y"); } }; @@ -34,7 +35,7 @@ class ActivationOpGrad : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - ctx.Output(framework::GradVarName("X")) + ctx.Output(framework::GradVarName("X")) ->Resize(ctx.Input("Y")->dims()); } }; diff --git a/paddle/operators/add_op.cc b/paddle/operators/add_op.cc index e83c1efeaf897889d18a37a6bd2ca2f8f012db25..ed11d096974341022637676537793645f46738f0 100644 --- a/paddle/operators/add_op.cc +++ b/paddle/operators/add_op.cc @@ -33,7 +33,7 @@ class AddOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(ctx.Input("X")->dims(), ctx.Input("Y")->dims(), "Two input of Add Op's dimension must be same."); - ctx.Output("Out")->Resize( + ctx.Output("Out")->Resize( ctx.Input("X")->dims()); } }; diff --git a/paddle/operators/clip_op.cc b/paddle/operators/clip_op.cc index 86d79866a8e7c4cda036ce7e0f5527fd0086b482..e5a54bc4b226fd24337050fdd84b2de9c49f7949 100644 --- a/paddle/operators/clip_op.cc +++ b/paddle/operators/clip_op.cc @@ -17,8 +17,6 @@ namespace paddle { namespace operators { -using framework::LoDTensor; - class ClipOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -29,11 +27,12 @@ class ClipOp : public framework::OperatorWithKernel { "Input(X) of ClipOp should not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), "Output(Out) of ClipOp should not be null."); - auto x_dims = ctx.Input("X")->dims(); + auto x_dims = ctx.Input("X")->dims(); auto max = Attr("max"); auto min = Attr("min"); PADDLE_ENFORCE_LT(min, max, "max should be greater than min."); - ctx.Output("Out")->Resize(x_dims); + ctx.Output("Out")->Resize(x_dims); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -66,8 +65,8 @@ class ClipOpGrad : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); - auto x_dims = ctx.Input("X")->dims(); - auto *x_grad = ctx.Output(framework::GradVarName("X")); + auto x_dims = ctx.Input("X")->dims(); + auto *x_grad = ctx.Output(framework::GradVarName("X")); if (x_grad != nullptr) { x_grad->Resize(x_dims); } diff --git a/paddle/operators/concat_op.cc b/paddle/operators/concat_op.cc index 223bb0ffe6e75ce71919eb5f4cca06bedbb00764..07f847079e834716904dcc038d2097efd268bd3e 100644 --- a/paddle/operators/concat_op.cc +++ b/paddle/operators/concat_op.cc @@ -29,7 +29,7 @@ class ConcatOp : public framework::OperatorWithKernel { "Output(Out) of ConcatOp should not be null."); auto ins = ctx.MultiInput("X"); - auto *out = ctx.Output("Out"); + auto *out = ctx.Output("Out"); size_t axis = static_cast(ctx.Attr("axis")); size_t n = ins.size(); diff --git a/paddle/operators/conv2d_op.cc b/paddle/operators/conv2d_op.cc index 12db65b5cbf224e95d91c7b4839afa552c084ee7..c3281db0964de6d7dd6be629fbcc55cabb9fef9d 100644 --- a/paddle/operators/conv2d_op.cc +++ b/paddle/operators/conv2d_op.cc @@ -37,7 +37,7 @@ class Conv2DOp : public framework::OperatorWithKernel { auto in = ctx.Input("Input"); auto filter = ctx.Input("Filter"); - auto out = ctx.Output("Output"); + auto out = ctx.Output("Output"); std::vector strides = Attr>("strides"); std::vector paddings = Attr>("paddings"); int groups = Attr("groups"); @@ -111,10 +111,9 @@ class Conv2DOpGrad : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { auto in = ctx.Input("Input"); auto filter = ctx.Input("Filter"); - auto d_in = - ctx.Output(framework::GradVarName("Input")); + auto d_in = ctx.Output(framework::GradVarName("Input")); auto d_filter = - ctx.Output(framework::GradVarName("Filter")); + ctx.Output(framework::GradVarName("Filter")); if (d_in) d_in->Resize(in->dims()); if (d_filter) d_filter->Resize(filter->dims()); } diff --git a/paddle/operators/cos_sim_op.cc b/paddle/operators/cos_sim_op.cc index 72c446493684246959656dc048e7f0e761665423..b56ee2047b811e212b4bf74bf7fbba753a6bcb11 100644 --- a/paddle/operators/cos_sim_op.cc +++ b/paddle/operators/cos_sim_op.cc @@ -54,9 +54,10 @@ class CosSimOp : public framework::OperatorWithKernel { " just 1 (which will be broadcasted to match Input(X))."); // resize tensor - ctx.Output("Out")->Resize({x_dims[0], 1}); - ctx.Output("XNorm")->Resize({x_dims[0], 1}); - ctx.Output("YNorm")->Resize({y_dims[0], 1}); + ctx.Output("Out")->Resize({x_dims[0], 1}); + ctx.Output("XNorm")->Resize({x_dims[0], 1}); + ctx.Output("YNorm")->Resize({y_dims[0], 1}); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -81,10 +82,13 @@ Cosine Similarity Operator. The equation is: Out = X^T * Y / (sqrt(X^T * X) * sqrt(Y^T * Y)). -Input(X) and Input(Y) must have the same shape, except that the 1st dimension -of Input(Y) could be just 1 (different from Input(X)), which will be -broadcasted to match the shape of Input(X) before computing their cosine +The input `X` and `Y` must have the same shape, except that the 1st dimension +of input `Y` could be just 1 (different from input `X`), which will be +broadcasted to match the shape of input `X` before computing their cosine similarity. + +Both the input `X` and `Y` can carry the LoD (Level of Details) information, +or not. But the output only shares the LoD with input `X`. )DOC"); } }; @@ -139,10 +143,8 @@ class CosSimOpGrad : public framework::OperatorWithKernel { "Shape of Input(Out@Grad) must be [X.Dim(0), 1]."); // resize tensor - auto *x_grad = - ctx.Output(framework::GradVarName("X")); - auto *y_grad = - ctx.Output(framework::GradVarName("Y")); + auto *x_grad = ctx.Output(framework::GradVarName("X")); + auto *y_grad = ctx.Output(framework::GradVarName("Y")); if (x_grad) x_grad->Resize(x_dims); if (y_grad) y_grad->Resize(y_dims); } diff --git a/paddle/operators/crop_op.cc b/paddle/operators/crop_op.cc index 7ed21f336f69e494f3c4039c609c83407a80cd8c..52a1123348b10e39bcfa1ba062c893e5f20ed862 100644 --- a/paddle/operators/crop_op.cc +++ b/paddle/operators/crop_op.cc @@ -19,7 +19,6 @@ namespace paddle { namespace operators { using framework::Tensor; -using framework::LoDTensor; class CropOp : public framework::OperatorWithKernel { public: @@ -31,9 +30,9 @@ class CropOp : public framework::OperatorWithKernel { "Input(X) of CropOp should not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), "Output(Out) of CropOp should not be null."); - auto x_dim = ctx.Input("X")->dims(); - auto *y = ctx.Input("Y"); - auto *out = ctx.Output("Out"); + auto x_dim = ctx.Input("X")->dims(); + auto *y = ctx.Input("Y"); + auto *out = ctx.Output("Out"); if (y == nullptr) { auto shape = Attr>("shape"); PADDLE_ENFORCE_EQ( @@ -121,8 +120,8 @@ class CropOpGrad : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) should not be null"); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); - auto x_dims = ctx.Input("X")->dims(); - auto *x_grad = ctx.Output(framework::GradVarName("X")); + auto x_dims = ctx.Input("X")->dims(); + auto *x_grad = ctx.Output(framework::GradVarName("X")); if (x_grad != nullptr) { x_grad->Resize(x_dims); } diff --git a/paddle/operators/cross_entropy_op.cc b/paddle/operators/cross_entropy_op.cc index 953367eb8bcd1282ab6c7e1189d778f0ce3da541..679f068c3d2bf0223ccf7bba82b003139f273125 100644 --- a/paddle/operators/cross_entropy_op.cc +++ b/paddle/operators/cross_entropy_op.cc @@ -17,8 +17,6 @@ limitations under the License. */ namespace paddle { namespace operators { -using framework::LoDTensor; - class CrossEntropyOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -51,7 +49,8 @@ class CrossEntropyOp : public framework::OperatorWithKernel { "Input(Label) must be 1."); } - ctx.Output("Y")->Resize({x->dims()[0], 1}); + ctx.Output("Y")->Resize({x->dims()[0], 1}); + ctx.ShareLoD("X", /*->*/ "Y"); } }; @@ -95,7 +94,7 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { "Input(Label) must be 1."); } - auto dx = ctx.Output(framework::GradVarName("X")); + auto dx = ctx.Output(framework::GradVarName("X")); dx->Resize(x->dims()); } }; @@ -133,6 +132,9 @@ computation. As a special case of 2), when each row of Input(Label) has only one non-zero element (equals 1), soft-label cross-entropy degenerates to a one-hot cross-entropy with one-hot label representation. + +Both the input `X` and `Label` can carry the LoD (Level of Details) information, +or not. But the output only shares the LoD with input `X`. )DOC"); } }; diff --git a/paddle/operators/dropout_op.cc b/paddle/operators/dropout_op.cc index b111b9fccb2310bd5fb92bda878a497c51f62ce0..7a6351b61287eccb0454fe279ea9bf38ed055bdf 100644 --- a/paddle/operators/dropout_op.cc +++ b/paddle/operators/dropout_op.cc @@ -18,7 +18,6 @@ namespace paddle { namespace operators { using framework::Tensor; -using framework::LoDTensor; class DropoutOp : public framework::OperatorWithKernel { public: @@ -34,10 +33,11 @@ class DropoutOp : public framework::OperatorWithKernel { ctx.Attr("is_training") == 1); auto dims = ctx.Input("X")->dims(); - ctx.Output("Out")->Resize(dims); + ctx.Output("Out")->Resize(dims); if (ctx.Attr("is_training") == 1) { - ctx.Output("Mask")->Resize(dims); + ctx.Output("Mask")->Resize(dims); } + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -96,7 +96,7 @@ class DropoutOpGrad : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(x_dims, mask_dims, "Dimensions of Input(X) and Mask must be the same."); - auto *x_grad = ctx.Output(framework::GradVarName("X")); + auto *x_grad = ctx.Output(framework::GradVarName("X")); x_grad->Resize(x_dims); } }; diff --git a/paddle/operators/elementwise_mul_op.cc b/paddle/operators/elementwise_mul_op.cc index ee6e975b443691bf71cec904565ced20406f3fba..02bd4c7b85790edba781f234f34bbec160844238 100644 --- a/paddle/operators/elementwise_mul_op.cc +++ b/paddle/operators/elementwise_mul_op.cc @@ -37,7 +37,8 @@ class ElementWiseMulOp : public framework::OperatorWithKernel { auto y_dim = ctx.Input("Y")->dims(); PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(), "Rank of first input must >= rank of second input.") - ctx.Output("Out")->Resize(x_dim); + ctx.Output("Out")->Resize(x_dim); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -63,11 +64,15 @@ Limited elementwise multiple operator.The equation is: Out = X ⊙ Y. 2. Y's shape is a subset of X. Y will be broadcasted to match the shape of X and axis should be dimension index Y in X. example: + shape(X) = (2, 3, 4, 5), shape(Y) = (,) shape(X) = (2, 3, 4, 5), shape(Y) = (5,) shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5) shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 + +Both the input X and Y can carry the LoD (Level of Details) information, +or not. But the output only shares the LoD with input X. )DOC"); } }; @@ -86,10 +91,8 @@ class ElementWiseMulOpGrad : public framework::OperatorWithKernel { auto x_dims = ctx.Input("X")->dims(); auto y_dims = ctx.Input("Y")->dims(); auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); - auto *x_grad = - ctx.Output(framework::GradVarName("X")); - auto *y_grad = - ctx.Output(framework::GradVarName("Y")); + auto *x_grad = ctx.Output(framework::GradVarName("X")); + auto *y_grad = ctx.Output(framework::GradVarName("Y")); PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), "Rank of first input must >= rank of second input.") diff --git a/paddle/operators/fc_op.cc b/paddle/operators/fc_op.cc index e5d0f3c3724262a60a463ef3beadd9906d3ebaf6..5ac0e8cc45f007d42f1b6d7f86333f5cbedb3ea8 100644 --- a/paddle/operators/fc_op.cc +++ b/paddle/operators/fc_op.cc @@ -186,6 +186,9 @@ W_i is a 2-D matrix of size (K x N), where N means the number of neurons in the fully connected layer. B is a 1-D vector of size N. Thus, the output Out is a 2-D matrix of size (M x N). Activation type can be set to `identity` (default), `sigmoid` or `softmax`. + +All the inputs can carry the LoD (Level of Details) information, +or not. But the output only shares the LoD with first input (`X[0]`). )DOC"); } }; diff --git a/paddle/operators/fill_zeros_like_op.cc b/paddle/operators/fill_zeros_like_op.cc index ba7857cc65f6860a6156674c6addc2bfdce21a99..761a527a5574edc779340ec595dfe1bc1964438a 100644 --- a/paddle/operators/fill_zeros_like_op.cc +++ b/paddle/operators/fill_zeros_like_op.cc @@ -23,15 +23,14 @@ class FillZerosLikeOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - PADDLE_ENFORCE_NOT_NULL( - ctx.InputVar("Src"), - "Input(Src) of FillZerosLikeOp should not be null."); - PADDLE_ENFORCE_NOT_NULL( - ctx.OutputVar("Dst"), - "Output(Dst) of FillZerosLikeOp should not be null."); - - ctx.Output("Dst")->Resize( - ctx.Input("Src")->dims()); + PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), + "Input(X) of FillZerosLikeOp should not be null."); + PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Y"), + "Output(Y) of FillZerosLikeOp should not be null."); + + ctx.Output("Y")->Resize( + ctx.Input("X")->dims()); + ctx.ShareLoD("X", /*->*/ "Y"); } }; @@ -40,8 +39,8 @@ class FillZerosLikeOpMaker : public framework::OpProtoAndCheckerMaker { FillZerosLikeOpMaker(framework::OpProto *proto, framework::OpAttrChecker *op_checker) : framework::OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Src", "The input of fill-zeros-like op."); - AddOutput("Dst", "The varibale will be filled up with zeros."); + AddInput("X", "The input of fill-zeros-like op."); + AddOutput("Y", "The varibale will be filled up with zeros."); AddComment(R"DOC( Fill up a vriable with zeros. diff --git a/paddle/operators/fill_zeros_like_op.h b/paddle/operators/fill_zeros_like_op.h index 969998ce2eae02b8ad057c6259703e51559bf98a..4474581784531faee1741f0b143743e31cc3788f 100644 --- a/paddle/operators/fill_zeros_like_op.h +++ b/paddle/operators/fill_zeros_like_op.h @@ -23,7 +23,7 @@ template class FillZerosLikeKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* output = context.Output("Dst"); + auto* output = context.Output("Y"); output->mutable_data(context.GetPlace()); auto t = framework::EigenVector::Flatten(*output); t.device(context.GetEigenDevice()) = t.constant(static_cast(0)); diff --git a/paddle/operators/gather_op.cc b/paddle/operators/gather_op.cc index d445b61c1657356f2cdcf1e98d756607de2bd042..fecd1ce2147a1e6f2f7928266be74ed7b647c5b9 100644 --- a/paddle/operators/gather_op.cc +++ b/paddle/operators/gather_op.cc @@ -35,7 +35,7 @@ class GatherOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_GE(batch_size, 0, "Batch size must be >0"); framework::DDim output_dims(ctx.Input("X")->dims()); output_dims[0] = batch_size; - ctx.Output("Out")->Resize(output_dims); + ctx.Output("Out")->Resize(output_dims); } }; @@ -45,7 +45,7 @@ class GatherGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - auto X_grad = ctx.Output(framework::GradVarName("X")); + auto X_grad = ctx.Output(framework::GradVarName("X")); auto X = ctx.Input("X"); X_grad->Resize(X->dims()); diff --git a/paddle/operators/gaussian_random_op.cc b/paddle/operators/gaussian_random_op.cc index c0e161bbc0c5486eb10408e43e6388f1b287abf8..5b7cbb5cc7bcb7e43b15363d37d7b8f2cbf0fbdc 100644 --- a/paddle/operators/gaussian_random_op.cc +++ b/paddle/operators/gaussian_random_op.cc @@ -48,7 +48,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel { ctx.OutputVar("Out"), "Output(Out) of GaussianRandomOp should not be null."); - auto* tensor = ctx.Output("Out"); + auto* tensor = ctx.Output("Out"); auto dims = Attr>("dims"); std::vector temp; temp.reserve(dims.size()); diff --git a/paddle/operators/lookup_table_op.cc b/paddle/operators/lookup_table_op.cc index 07f6dfabca5879e3de6004e59d2e87f7fa68d66c..04ac24662e9cfec6a49cd213cb76bdebc7b730c8 100644 --- a/paddle/operators/lookup_table_op.cc +++ b/paddle/operators/lookup_table_op.cc @@ -32,9 +32,10 @@ class LookupTableOp : public framework::OperatorWithKernel { auto table_t = ctx.Input("W"); auto ids_t = ctx.Input("Ids"); - auto output_t = ctx.Output("Out"); + auto output_t = ctx.Output("Out"); output_t->Resize({ids_t->dims()[0], table_t->dims()[1]}); + ctx.ShareLoD("Ids", /*->*/ "Out"); } }; @@ -50,9 +51,13 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { "An input with type int32 or int64" "contains the ids to be looked up in W."); AddOutput("Out", "The lookup results, which have the same type with W."); - AddComment( - "This operator is used to perform lookups on the parameter W," - "then concatenated into a dense tensor."); + AddComment(R"DOC( +This operator is used to perform lookups on the parameter W, +then concatenated into a dense tensor. + +The input `Ids` can carry the LoD (Level of Details) information, +or not. And the output only shares the LoD with input `Ids`. +)DOC"); } }; @@ -64,7 +69,7 @@ class LookupTableOpGrad : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &context) const override { auto table = context.Input("W"); auto d_table = - context.Output(framework::GradVarName("W")); + context.Output(framework::GradVarName("W")); d_table->Resize(table->dims()); } }; diff --git a/paddle/operators/mean_op.cc b/paddle/operators/mean_op.cc index 7d7eeb59a23435036dc33c1e4fe6dd1c4a1a2f62..b04384bda81b93f5db0be3206eee10ad5e854540 100644 --- a/paddle/operators/mean_op.cc +++ b/paddle/operators/mean_op.cc @@ -27,7 +27,7 @@ class MeanOp : public framework::OperatorWithKernel { "Input(X) of MeanOp should not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), "Output(Out) of MeanOp should not be null."); - ctx.Output("Out")->Resize({1}); + ctx.Output("Out")->Resize({1}); } }; @@ -37,7 +37,8 @@ class MeanOpMaker : public framework::OpProtoAndCheckerMaker { : OpProtoAndCheckerMaker(proto, op_checker) { AddInput("X", "The input of mean op"); AddOutput("Out", "The output of mean op").NotInGradient(); - AddComment("Mean Operator"); + AddComment(R"DOC( Mean Operator +)DOC"); } }; @@ -47,7 +48,7 @@ class MeanGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { - ctx.Output(framework::GradVarName("X")) + ctx.Output(framework::GradVarName("X")) ->Resize(ctx.Input("X")->dims()); } }; diff --git a/paddle/operators/minus_op.cc b/paddle/operators/minus_op.cc index a97bbecdca1779df330d1053cf359bb658aa75c2..29cb85489bd05f6c1e7143d962eac0af26e75825 100644 --- a/paddle/operators/minus_op.cc +++ b/paddle/operators/minus_op.cc @@ -40,7 +40,8 @@ class MinusOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( left_tensor->numel(), right_tensor->numel(), "Minus operator must take two tensor with same num of elements"); - ctx.Output("Out")->Resize(left_tensor->dims()); + ctx.Output("Out")->Resize(left_tensor->dims()); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -54,7 +55,12 @@ class MinusOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC(Minus Operator -Equation: Out = X - Y +Equation: + + Out = X - Y + +Both the input `X` and `Y` can carry the LoD (Level of Details) information, +or not. But the output only shares the LoD with input `X`. )DOC"); } }; diff --git a/paddle/operators/modified_huber_loss_op.cc b/paddle/operators/modified_huber_loss_op.cc index 6fe018f9a8fd74479a2feed07379c3179b7c72bd..8606c0d1e1bf7a52299528d30af0367d9f93edd2 100644 --- a/paddle/operators/modified_huber_loss_op.cc +++ b/paddle/operators/modified_huber_loss_op.cc @@ -34,8 +34,8 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(x->dims().size(), 2, "The tensor rank of X must be 2."); PADDLE_ENFORCE_EQ(x->dims()[1], 1, "The 2nd dimension of X must be 1."); - context.Output("IntermediateVal")->Resize(x->dims()); - context.Output("Out")->Resize({x->dims()[0], 1}); + context.Output("IntermediateVal")->Resize(x->dims()); + context.Output("Out")->Resize({x->dims()[0], 1}); } }; @@ -81,7 +81,7 @@ class ModifiedHuberLossGradOp : public framework::OperatorWithKernel { auto* intermediate_val = context.Input("IntermediateVal"); auto* out_grad = context.Input(framework::GradVarName("Out")); auto* x_grad = - context.Output(framework::GradVarName("X")); + context.Output(framework::GradVarName("X")); PADDLE_ENFORCE_NOT_NULL(x, "X must be initialized."); PADDLE_ENFORCE_NOT_NULL(y, "Y must be initialized."); diff --git a/paddle/operators/modified_huber_loss_op.h b/paddle/operators/modified_huber_loss_op.h index 2b2aae17084992c4935a697763ff902e455dfcbd..cb51007749e3c59572d4852959f4119ac377decc 100644 --- a/paddle/operators/modified_huber_loss_op.h +++ b/paddle/operators/modified_huber_loss_op.h @@ -52,8 +52,8 @@ class ModifiedHuberLossKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& context) const override { auto* in0 = context.Input("X"); auto* in1 = context.Input("Y"); - auto* out0 = context.Output("IntermediateVal"); - auto* out1 = context.Output("Out"); + auto* out0 = context.Output("IntermediateVal"); + auto* out1 = context.Output("Out"); out0->mutable_data(context.GetPlace()); out1->mutable_data(context.GetPlace()); @@ -77,11 +77,9 @@ class ModifiedHuberLossGradCPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { auto* in0 = context.Input("Y"); - auto* in1 = context.Input("IntermediateVal"); - auto* in2 = - context.Input(framework::GradVarName("Out")); - auto* out0 = - context.Output(framework::GradVarName("X")); + auto* in1 = context.Input("IntermediateVal"); + auto* in2 = context.Input(framework::GradVarName("Out")); + auto* out0 = context.Output(framework::GradVarName("X")); if (out0) { const T* y_ptr = in0->data(); diff --git a/paddle/operators/mul_op.cc b/paddle/operators/mul_op.cc index b6d320b415e02549e85cb36ab517b0b5433887d5..7047718a3f1bf7e9598952efa1d9bcb20d5cf5b4 100644 --- a/paddle/operators/mul_op.cc +++ b/paddle/operators/mul_op.cc @@ -18,7 +18,6 @@ namespace paddle { namespace operators { using framework::Tensor; -using framework::LoDTensor; class MulOp : public framework::OperatorWithKernel { public: @@ -53,8 +52,9 @@ class MulOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( x_mat_dims[1], y_mat_dims[0], "First matrix's width must be equal with second matrix's height."); - ctx.Output("Out")->Resize( + ctx.Output("Out")->Resize( {x_mat_dims[0], y_mat_dims[1]}); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -83,9 +83,14 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { .SetDefault(1) .EqualGreaterThan(1); AddComment(R"DOC( -Two Element Mul Operator. +Mul operator is used to perform matrix multiplication for input X and Y. -The equation is: Out = X * Y +The equation is: + + Out = X * Y + +Both the input `X` and `Y` can carry the LoD (Level of Details) information, +or not. But the output only shares the LoD with input `X`. )DOC"); } }; @@ -103,10 +108,8 @@ class MulOpGrad : public framework::OperatorWithKernel { auto x_dims = ctx.Input("X")->dims(); auto y_dims = ctx.Input("Y")->dims(); auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); - auto *x_grad = - ctx.Output(framework::GradVarName("X")); - auto *y_grad = - ctx.Output(framework::GradVarName("Y")); + auto *x_grad = ctx.Output(framework::GradVarName("X")); + auto *y_grad = ctx.Output(framework::GradVarName("Y")); auto x_mat_dims = framework::flatten_to_2d(x_dims, Attr("x_num_col_dims")); diff --git a/paddle/operators/pad_op.cc b/paddle/operators/pad_op.cc index a0b1c6b631d97a40d774f7d2ff9550fda9c32db4..375d8a35acc0716259071c31bc332fdf5aabce1c 100644 --- a/paddle/operators/pad_op.cc +++ b/paddle/operators/pad_op.cc @@ -39,8 +39,13 @@ class PadOp : public framework::OperatorWithKernel { for (int i = 0; i < x_dim.size(); ++i) { out_dims[i] = x_dim[i] + paddings[i * 2] + paddings[i * 2 + 1]; } - ctx.Output("Out")->Resize( + ctx.Output("Out")->Resize( framework::make_ddim(out_dims)); + if (out_dims[0] == x_dim[0]) { + // Only pass LoD when the first dimension is equal between + // output and input. + ctx.ShareLoD("X", /*->*/ "Out"); + } } }; @@ -101,7 +106,7 @@ class PadOpGrad : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); auto x_dims = ctx.Input("X")->dims(); - auto *x_g = ctx.Output(framework::GradVarName("X")); + auto *x_g = ctx.Output(framework::GradVarName("X")); if (x_g != nullptr) { x_g->Resize(x_dims); } diff --git a/paddle/operators/prelu_op.cc b/paddle/operators/prelu_op.cc index 7ae80b296850f2f433c89d904ebf32355b2a29c7..912196c190b5ddbd4e3482a5314e949186b94368 100644 --- a/paddle/operators/prelu_op.cc +++ b/paddle/operators/prelu_op.cc @@ -36,8 +36,9 @@ class PReluOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), "Output(Out) should not be null"); - auto *out = ctx.Output("Out"); + auto *out = ctx.Output("Out"); out->Resize(in->dims()); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -55,6 +56,8 @@ The equation is: f(x) = alpha * x , for x < 0 f(x) = x , for x >= 0 +The input `X` can carry the LoD (Level of Details) information, +or not. And the output shares the LoD with input `X`. )DOC"); } }; @@ -69,11 +72,11 @@ class PReluGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X"), "Input(X) must not be null."); PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); - auto *dx = ctx.Output(framework::GradVarName("X")); + auto *dx = ctx.Output(framework::GradVarName("X")); auto *x = ctx.Input("X"); auto *dalpha = - ctx.Output(framework::GradVarName("Alpha")); + ctx.Output(framework::GradVarName("Alpha")); auto *alpha = ctx.Input("Alpha"); dx->Resize(x->dims()); diff --git a/paddle/operators/rank_loss_op.cc b/paddle/operators/rank_loss_op.cc index 4bba4200728ebf7e7810ed935f6fdf51c96cbc7a..39af08c8751c3b95cf5fdef7395186a0176a20a2 100644 --- a/paddle/operators/rank_loss_op.cc +++ b/paddle/operators/rank_loss_op.cc @@ -40,7 +40,7 @@ class RankLossOp : public framework::OperatorWithKernel { "All inputs must have the same size"); PADDLE_ENFORCE((label_dims.size() == 2) && (label_dims[1] == 1), "All inputs must be row vector with size batch_size x 1."); - ctx.Output("Out")->Resize(label_dims); + ctx.Output("Out")->Resize(label_dims); } }; @@ -102,9 +102,9 @@ class RankLossGradOp : public framework::OperatorWithKernel { "Input(Out@GRAD) shouldn't be null."); auto dims = ctx.Input("Left")->dims(); auto *left_grad = - ctx.Output(framework::GradVarName("Left")); + ctx.Output(framework::GradVarName("Left")); auto *right_grad = - ctx.Output(framework::GradVarName("Right")); + ctx.Output(framework::GradVarName("Right")); if (left_grad) { left_grad->Resize(dims); } diff --git a/paddle/operators/rank_loss_op.h b/paddle/operators/rank_loss_op.h index 9776d123fe4b0cb0cd16a15770fcf42a966fa011..7df195ff47ecfd79388385eed4bd37b8c9b45979 100644 --- a/paddle/operators/rank_loss_op.h +++ b/paddle/operators/rank_loss_op.h @@ -24,7 +24,7 @@ template class RankLossKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { - auto* out_t = ctx.Output("Out"); + auto* out_t = ctx.Output("Out"); auto* label_t = ctx.Input("Label"); auto* left_t = ctx.Input("Left"); auto* right_t = ctx.Input("Right"); @@ -46,9 +46,9 @@ class RankLossGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { auto* d_left_t = - ctx.Output(framework::GradVarName("Left")); + ctx.Output(framework::GradVarName("Left")); auto* d_right_t = - ctx.Output(framework::GradVarName("Right")); + ctx.Output(framework::GradVarName("Right")); auto* d_out_t = ctx.Input(framework::GradVarName("Out")); auto* label_t = ctx.Input("Label"); diff --git a/paddle/operators/reshape_op.cc b/paddle/operators/reshape_op.cc index 0d05e344148c68f5625dd819ec59c5991892e4ce..ddb93007e21e4d1ae4be3650019c8bc6a680252d 100644 --- a/paddle/operators/reshape_op.cc +++ b/paddle/operators/reshape_op.cc @@ -50,7 +50,12 @@ class ReshapeOp : public framework::OperatorWithKernel { std::transform(shape.begin(), shape.end(), shape_int64.begin(), [](int a) { return static_cast(a); }); auto out_dims = framework::make_ddim(shape_int64); - ctx.Output("Out")->Resize(out_dims); + ctx.Output("Out")->Resize(out_dims); + if (shape[0] == in->dims()[0]) { + // Only pass LoD when the first dimension is equal between + // output and input. + ctx.ShareLoD("X", /*->*/ "Out"); + } } }; @@ -94,7 +99,7 @@ class ReshapeGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) shouldn't be null."); auto dims = ctx.Input("X")->dims(); - auto *d_in = ctx.Output(framework::GradVarName("X")); + auto *d_in = ctx.Output(framework::GradVarName("X")); d_in->Resize(dims); } }; diff --git a/paddle/operators/rowwise_add_op.cc b/paddle/operators/rowwise_add_op.cc index 2a3fd3be941d91aaa6b014df91d3025f07767577..fc3ad721f210213491617452141dfa8834b067c0 100644 --- a/paddle/operators/rowwise_add_op.cc +++ b/paddle/operators/rowwise_add_op.cc @@ -44,7 +44,8 @@ class RowwiseAddOp : public framework::OperatorWithKernel { framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims, "The width of two operands must be same"); PADDLE_ENFORCE_EQ(ctx.OutputSize("Out"), 1, "The output size must be 1"); - ctx.Output("Out")->Resize(x_dims); + ctx.Output("Out")->Resize(x_dims); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -83,8 +84,8 @@ class RowwiseAddGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ( framework::slice_ddim(x_dims, num_col_dims, x_dims.size()), b_dims, "The width of two operands must be same"); - auto *dx = ctx.Output(framework::GradVarName("X")); - auto *db = ctx.Output(framework::GradVarName("b")); + auto *dx = ctx.Output(framework::GradVarName("X")); + auto *db = ctx.Output(framework::GradVarName("b")); if (dx) dx->Resize(x_dims); if (db) db->Resize(b_dims); } diff --git a/paddle/operators/scale_op.cc b/paddle/operators/scale_op.cc index d1f42e8662537d35e17429f9d436fdc0e5a1dc11..1ae77a9722ef1a5548a6c4100c32fdddcee8c5cd 100644 --- a/paddle/operators/scale_op.cc +++ b/paddle/operators/scale_op.cc @@ -33,8 +33,9 @@ class ScaleOp : public framework::OperatorWithKernel { "Output(Out) of ScaleOp should not be null."); auto *in = ctx.Input("X"); - auto *out = ctx.Output("Out"); + auto *out = ctx.Output("Out"); out->Resize(in->dims()); + ctx.ShareLoD("X", /*->*/ "Out"); } }; diff --git a/paddle/operators/scatter_op.cc b/paddle/operators/scatter_op.cc index 8820262732327306f4f807702751708bd1e2aa36..3f02081a060281dec533c02b346f0667da28b8c3 100644 --- a/paddle/operators/scatter_op.cc +++ b/paddle/operators/scatter_op.cc @@ -44,7 +44,7 @@ class ScatterOp : public framework::OperatorWithKernel { framework::DDim data_dim(ctx.Input("Updates")->dims()); for (int i = 1; i < data_dim.size(); ++i) PADDLE_ENFORCE_EQ(data_dim[i], ctx.Input("Updates")->dims()[i]); - ctx.Output("Out")->Resize( + ctx.Output("Out")->Resize( ctx.Input("Ref")->dims()); } }; @@ -56,10 +56,9 @@ class ScatterGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { auto *dUpdates = - ctx.Output(framework::GradVarName("Updates")); + ctx.Output(framework::GradVarName("Updates")); auto *Updates = ctx.Input("Updates"); - auto *dRef = - ctx.Output(framework::GradVarName("Ref")); + auto *dRef = ctx.Output(framework::GradVarName("Ref")); auto *Ref = ctx.Input("Ref"); dRef->Resize(Ref->dims()); diff --git a/paddle/operators/sgd_op.cc b/paddle/operators/sgd_op.cc index 1232e64c7f0132b9ea19b3d7e1ebe9531e1e25a5..b063e2427217f20eb89f7cd1af0354ad0e400feb 100644 --- a/paddle/operators/sgd_op.cc +++ b/paddle/operators/sgd_op.cc @@ -33,7 +33,7 @@ class SGDOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(ctx.Input("param")->dims(), ctx.Input("grad")->dims(), "Two input of SGD Op's dimension must be same."); - ctx.Output("param_out") + ctx.Output("param_out") ->Resize(ctx.Input("param")->dims()); } }; diff --git a/paddle/operators/smooth_l1_loss_op.cc b/paddle/operators/smooth_l1_loss_op.cc index 9ee6fff8db6a285a0314431e4e13b284c78c8a70..ae6d1c80b300690b070024d6266a1b99bf2ef04f 100644 --- a/paddle/operators/smooth_l1_loss_op.cc +++ b/paddle/operators/smooth_l1_loss_op.cc @@ -44,8 +44,8 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { "The shape of OutsideWeight must be same as X."); } - auto* diff = ctx.Output("Diff"); - auto* out = ctx.Output("Out"); + auto* diff = ctx.Output("Diff"); + auto* out = ctx.Output("Out"); diff->Resize(x->dims()); // loss is a two-rank tensor out->Resize({x->dims()[0], 1}); @@ -103,10 +103,8 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { auto in_dims = ctx.Input("X")->dims(); auto out_dims = ctx.Input(framework::GradVarName("Out"))->dims(); - auto* x_grad = - ctx.Output(framework::GradVarName("X")); - auto* y_grad = - ctx.Output(framework::GradVarName("Y")); + auto* x_grad = ctx.Output(framework::GradVarName("X")); + auto* y_grad = ctx.Output(framework::GradVarName("Y")); PADDLE_ENFORCE_GE(out_dims.size(), 2, "The tensor rank of Input(Out@Grad) should be 2."); diff --git a/paddle/operators/softmax_op.cc b/paddle/operators/softmax_op.cc index c67eb028c882ed82ca4e6a4dd70cdea9f69cdc24..e15cfe485016552971924a40a172e74a90629dce 100644 --- a/paddle/operators/softmax_op.cc +++ b/paddle/operators/softmax_op.cc @@ -30,8 +30,7 @@ class SoftmaxOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx.Input("X")->dims().size() == 2UL, "The input of softmax op must be a matrix."); - ctx.Output("Y")->Resize( - ctx.Input("X")->dims()); + ctx.Output("Y")->Resize(ctx.Input("X")->dims()); } }; @@ -77,7 +76,7 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { ctx.Input(framework::GradVarName("Y"))->dims(), "Input(Y) and its gradients should have a same shape."); - ctx.Output(framework::GradVarName("X")) + ctx.Output(framework::GradVarName("X")) ->Resize(ctx.Input("X")->dims()); } }; diff --git a/paddle/operators/split_op.cc b/paddle/operators/split_op.cc index 61296f5c8122fdce7083e9a91dc313482875c805..a9d35b4fb79ae83379552ae2c2b4d694bd8f86dd 100644 --- a/paddle/operators/split_op.cc +++ b/paddle/operators/split_op.cc @@ -27,7 +27,7 @@ class SplitOp : public framework::OperatorWithKernel { void InferShape(const framework::InferShapeContext &ctx) const override { // infershape auto *in = ctx.Input("X"); - auto outs = ctx.MultiOutput("Out"); + auto outs = ctx.MultiOutput("Out"); size_t axis = static_cast(ctx.Attr("axis")); size_t num = static_cast(ctx.Attr("num")); std::vector sections = diff --git a/paddle/operators/squared_l2_distance_op.cc b/paddle/operators/squared_l2_distance_op.cc index 39f4305877de20d451bc35fe698a0eabf9758d57..33a564b05b1b490c6d23b7d17cef45b7740dfa39 100644 --- a/paddle/operators/squared_l2_distance_op.cc +++ b/paddle/operators/squared_l2_distance_op.cc @@ -54,9 +54,10 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { "First dimension of target must be equal to input " "or to 1."); - ctx.Output("sub_result") + ctx.Output("sub_result") ->Resize({x_dims[0], x->numel() / x_dims[0]}); - ctx.Output("Out")->Resize({x_dims[0], 1}); + ctx.Output("Out")->Resize({x_dims[0], 1}); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -79,6 +80,9 @@ class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker { input or to 1. If the first dimension of target is 1, SquaredL2DistanceOp will broadcast target's first dimension to input's first dimension. You can decide whether calculate the gradient of input and target. + + Both the input X and Y can carry the LoD (Level of Details) information, + or not. But the output only shares the LoD with input X. )DOC"); } }; @@ -100,10 +104,8 @@ class SquaredL2DistanceGradOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(out_dims[1], 1, "Second dimension of output gradient " "must be 1."); - auto* x_grad = - ctx.Output(framework::GradVarName("X")); - auto* y_grad = - ctx.Output(framework::GradVarName("Y")); + auto* x_grad = ctx.Output(framework::GradVarName("X")); + auto* y_grad = ctx.Output(framework::GradVarName("Y")); if (x_grad) x_grad->Resize(x_dims); if (y_grad) y_grad->Resize(y_dims); } diff --git a/paddle/operators/sum_op.cc b/paddle/operators/sum_op.cc index 41e05c27f9029b2664685d3979fadcfd2bf6dbce..437fc262f359525045a4d772ee2c204ef571caa7 100644 --- a/paddle/operators/sum_op.cc +++ b/paddle/operators/sum_op.cc @@ -28,7 +28,7 @@ class SumOp : public framework::OperatorWithKernel { "Output(Out) of SumOp should not be null."); auto ins = ctx.MultiInput("X"); - auto *out = ctx.Output("Out"); + auto *out = ctx.Output("Out"); int N = ins.size(); auto in_dim = ins[0]->dims(); @@ -39,6 +39,7 @@ class SumOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(in_dim == dim, "Input tensors must have same shape"); } out->Resize(in_dim); + ctx.ShareLoD("X", /*->*/ "Out"); } }; @@ -49,8 +50,11 @@ class SumOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("X", "the input tensors of sum operator.").AsDuplicable(); AddOutput("Out", "the output tensor of sum operator."); AddComment(R"DOC( - Sum the input tensors. - )DOC"); +Sum the input tensors. + +All the inputs can carry the LoD (Level of Details) information, +or not. But the output only shares the LoD with the first input. +)DOC"); } }; @@ -61,7 +65,7 @@ class SumGradOp : public framework::OperatorWithKernel { protected: void InferShape(const framework::InferShapeContext &ctx) const override { auto outputs = - ctx.MultiOutput(framework::GradVarName("X")); + ctx.MultiOutput(framework::GradVarName("X")); auto dims = ctx.Input(framework::GradVarName("Out"))->dims(); for (auto output : outputs) { output->Resize(dims); diff --git a/paddle/operators/top_k_op.cc b/paddle/operators/top_k_op.cc index 169b815feffd86f9ff04c129ccc997230ce03a8c..a6e43964e9825cd1ced9e7c1bc8d691422248fee 100644 --- a/paddle/operators/top_k_op.cc +++ b/paddle/operators/top_k_op.cc @@ -40,8 +40,8 @@ class TopkOp : public framework::OperatorWithKernel { framework::DDim dims = input->dims(); dims[dims.size() - 1] = k; - ctx.Output("Out")->Resize(dims); - ctx.Output("Indices")->Resize(dims); + ctx.Output("Out")->Resize(dims); + ctx.Output("Indices")->Resize(dims); } }; diff --git a/paddle/operators/transpose_op.cc b/paddle/operators/transpose_op.cc index babf2f561c31d5436fe1611c576e6e7fc04401db..017a05326e9b397185d7c3530891884b11784783 100644 --- a/paddle/operators/transpose_op.cc +++ b/paddle/operators/transpose_op.cc @@ -51,7 +51,7 @@ class TransposeOp : public framework::OperatorWithKernel { for (size_t i = 0; i < axis_size; i++) { out_dims[i] = x_dims[axis[i]]; } - ctx.Output("Out")->Resize(out_dims); + ctx.Output("Out")->Resize(out_dims); } }; @@ -99,8 +99,7 @@ class TransposeOpGrad : public framework::OperatorWithKernel { PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) should not be null"); auto x_dims = ctx.Input("X")->dims(); - auto *x_grad = - ctx.Output(framework::GradVarName("X")); + auto *x_grad = ctx.Output(framework::GradVarName("X")); if (x_grad) x_grad->Resize(x_dims); } diff --git a/paddle/operators/uniform_random_op.cc b/paddle/operators/uniform_random_op.cc index 184bcbc29c0d26a214345506f126f9cc0d406b07..17ea48361bc597ccfeb80884d51900e6567aa057 100644 --- a/paddle/operators/uniform_random_op.cc +++ b/paddle/operators/uniform_random_op.cc @@ -54,7 +54,7 @@ class UniformRandomOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(Attr("min") < Attr("max"), "uniform_random's min must less then max"); - auto* tensor = ctx.Output("Out"); + auto* tensor = ctx.Output("Out"); auto dims = Attr>("dims"); std::vector temp; temp.reserve(dims.size()); diff --git a/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py b/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py index 2473daaba24438819f3f55ccc40fe1c64ee59960..eff8fa87d9c0dafc6935604101e94ee6c8b081ce 100644 --- a/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py +++ b/python/paddle/v2/framework/tests/test_fill_zeros_like_op.py @@ -6,8 +6,8 @@ from op_test import OpTest class TestFillZerosLikeOp(OpTest): def setUp(self): self.op_type = "fill_zeros_like" - self.inputs = {'Src': np.random.random((219, 232)).astype("float32")} - self.outputs = {'Dst': np.zeros_like(self.inputs["Src"])} + self.inputs = {'X': np.random.random((219, 232)).astype("float32")} + self.outputs = {'Y': np.zeros_like(self.inputs["X"])} def test_check_output(self): self.check_output()